1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 NVIDIA CORPORATION. All rights reserved.
4 */
5
6 #include <linux/clk.h>
7 #include <linux/clk/tegra.h>
8 #include <linux/device.h>
9 #include <linux/host1x.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_graph.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15
16 #include <media/v4l2-fwnode.h>
17
18 #include "csi.h"
19 #include "video.h"
20
21 #define MHZ 1000000
22
23 static inline struct tegra_csi *
host1x_client_to_csi(struct host1x_client * client)24 host1x_client_to_csi(struct host1x_client *client)
25 {
26 return container_of(client, struct tegra_csi, client);
27 }
28
to_csi_chan(struct v4l2_subdev * subdev)29 static inline struct tegra_csi_channel *to_csi_chan(struct v4l2_subdev *subdev)
30 {
31 return container_of(subdev, struct tegra_csi_channel, subdev);
32 }
33
34 /*
35 * CSI is a separate subdevice which has 6 source pads to generate
36 * test pattern. CSI subdevice pad ops are used only for TPG and
37 * allows below TPG formats.
38 */
39 static const struct v4l2_mbus_framefmt tegra_csi_tpg_fmts[] = {
40 {
41 TEGRA_DEF_WIDTH,
42 TEGRA_DEF_HEIGHT,
43 MEDIA_BUS_FMT_SRGGB10_1X10,
44 V4L2_FIELD_NONE,
45 V4L2_COLORSPACE_SRGB
46 },
47 {
48 TEGRA_DEF_WIDTH,
49 TEGRA_DEF_HEIGHT,
50 MEDIA_BUS_FMT_RGB888_1X32_PADHI,
51 V4L2_FIELD_NONE,
52 V4L2_COLORSPACE_SRGB
53 },
54 };
55
56 static const struct v4l2_frmsize_discrete tegra_csi_tpg_sizes[] = {
57 { 1280, 720 },
58 { 1920, 1080 },
59 { 3840, 2160 },
60 };
61
62 /*
63 * V4L2 Subdevice Pad Operations
64 */
csi_enum_bus_code(struct v4l2_subdev * subdev,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_mbus_code_enum * code)65 static int csi_enum_bus_code(struct v4l2_subdev *subdev,
66 struct v4l2_subdev_state *sd_state,
67 struct v4l2_subdev_mbus_code_enum *code)
68 {
69 if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
70 return -ENOIOCTLCMD;
71
72 if (code->index >= ARRAY_SIZE(tegra_csi_tpg_fmts))
73 return -EINVAL;
74
75 code->code = tegra_csi_tpg_fmts[code->index].code;
76
77 return 0;
78 }
79
csi_get_format(struct v4l2_subdev * subdev,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)80 static int csi_get_format(struct v4l2_subdev *subdev,
81 struct v4l2_subdev_state *sd_state,
82 struct v4l2_subdev_format *fmt)
83 {
84 struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
85
86 if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
87 return -ENOIOCTLCMD;
88
89 fmt->format = csi_chan->format;
90
91 return 0;
92 }
93
csi_get_frmrate_table_index(struct tegra_csi * csi,u32 code,u32 width,u32 height)94 static int csi_get_frmrate_table_index(struct tegra_csi *csi, u32 code,
95 u32 width, u32 height)
96 {
97 const struct tpg_framerate *frmrate;
98 unsigned int i;
99
100 frmrate = csi->soc->tpg_frmrate_table;
101 for (i = 0; i < csi->soc->tpg_frmrate_table_size; i++) {
102 if (frmrate[i].code == code &&
103 frmrate[i].frmsize.width == width &&
104 frmrate[i].frmsize.height == height) {
105 return i;
106 }
107 }
108
109 return -EINVAL;
110 }
111
csi_chan_update_blank_intervals(struct tegra_csi_channel * csi_chan,u32 code,u32 width,u32 height)112 static void csi_chan_update_blank_intervals(struct tegra_csi_channel *csi_chan,
113 u32 code, u32 width, u32 height)
114 {
115 struct tegra_csi *csi = csi_chan->csi;
116 const struct tpg_framerate *frmrate = csi->soc->tpg_frmrate_table;
117 int index;
118
119 index = csi_get_frmrate_table_index(csi_chan->csi, code,
120 width, height);
121 if (index >= 0) {
122 csi_chan->h_blank = frmrate[index].h_blank;
123 csi_chan->v_blank = frmrate[index].v_blank;
124 csi_chan->framerate = frmrate[index].framerate;
125 }
126 }
127
csi_enum_framesizes(struct v4l2_subdev * subdev,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_frame_size_enum * fse)128 static int csi_enum_framesizes(struct v4l2_subdev *subdev,
129 struct v4l2_subdev_state *sd_state,
130 struct v4l2_subdev_frame_size_enum *fse)
131 {
132 unsigned int i;
133
134 if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
135 return -ENOIOCTLCMD;
136
137 if (fse->index >= ARRAY_SIZE(tegra_csi_tpg_sizes))
138 return -EINVAL;
139
140 for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++)
141 if (fse->code == tegra_csi_tpg_fmts[i].code)
142 break;
143
144 if (i == ARRAY_SIZE(tegra_csi_tpg_fmts))
145 return -EINVAL;
146
147 fse->min_width = tegra_csi_tpg_sizes[fse->index].width;
148 fse->max_width = tegra_csi_tpg_sizes[fse->index].width;
149 fse->min_height = tegra_csi_tpg_sizes[fse->index].height;
150 fse->max_height = tegra_csi_tpg_sizes[fse->index].height;
151
152 return 0;
153 }
154
csi_enum_frameintervals(struct v4l2_subdev * subdev,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_frame_interval_enum * fie)155 static int csi_enum_frameintervals(struct v4l2_subdev *subdev,
156 struct v4l2_subdev_state *sd_state,
157 struct v4l2_subdev_frame_interval_enum *fie)
158 {
159 struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
160 struct tegra_csi *csi = csi_chan->csi;
161 const struct tpg_framerate *frmrate = csi->soc->tpg_frmrate_table;
162 int index;
163
164 if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
165 return -ENOIOCTLCMD;
166
167 /* one framerate per format and resolution */
168 if (fie->index > 0)
169 return -EINVAL;
170
171 index = csi_get_frmrate_table_index(csi_chan->csi, fie->code,
172 fie->width, fie->height);
173 if (index < 0)
174 return -EINVAL;
175
176 fie->interval.numerator = 1;
177 fie->interval.denominator = frmrate[index].framerate;
178
179 return 0;
180 }
181
csi_set_format(struct v4l2_subdev * subdev,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_format * fmt)182 static int csi_set_format(struct v4l2_subdev *subdev,
183 struct v4l2_subdev_state *sd_state,
184 struct v4l2_subdev_format *fmt)
185 {
186 struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
187 struct v4l2_mbus_framefmt *format = &fmt->format;
188 const struct v4l2_frmsize_discrete *sizes;
189 unsigned int i;
190
191 if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
192 return -ENOIOCTLCMD;
193
194 sizes = v4l2_find_nearest_size(tegra_csi_tpg_sizes,
195 ARRAY_SIZE(tegra_csi_tpg_sizes),
196 width, height,
197 format->width, format->width);
198 format->width = sizes->width;
199 format->height = sizes->height;
200
201 for (i = 0; i < ARRAY_SIZE(tegra_csi_tpg_fmts); i++)
202 if (format->code == tegra_csi_tpg_fmts[i].code)
203 break;
204
205 if (i == ARRAY_SIZE(tegra_csi_tpg_fmts))
206 i = 0;
207
208 format->code = tegra_csi_tpg_fmts[i].code;
209 format->field = V4L2_FIELD_NONE;
210
211 if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
212 return 0;
213
214 /* update blanking intervals from frame rate table and format */
215 csi_chan_update_blank_intervals(csi_chan, format->code,
216 format->width, format->height);
217 csi_chan->format = *format;
218
219 return 0;
220 }
221
222 /*
223 * V4L2 Subdevice Video Operations
224 */
tegra_csi_get_frame_interval(struct v4l2_subdev * subdev,struct v4l2_subdev_state * sd_state,struct v4l2_subdev_frame_interval * vfi)225 static int tegra_csi_get_frame_interval(struct v4l2_subdev *subdev,
226 struct v4l2_subdev_state *sd_state,
227 struct v4l2_subdev_frame_interval *vfi)
228 {
229 struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
230
231 if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
232 return -ENOIOCTLCMD;
233
234 /*
235 * FIXME: Implement support for V4L2_SUBDEV_FORMAT_TRY, using the V4L2
236 * subdev active state API.
237 */
238 if (vfi->which != V4L2_SUBDEV_FORMAT_ACTIVE)
239 return -EINVAL;
240
241 vfi->interval.numerator = 1;
242 vfi->interval.denominator = csi_chan->framerate;
243
244 return 0;
245 }
246
csi_get_pixel_rate(struct tegra_csi_channel * csi_chan)247 static unsigned int csi_get_pixel_rate(struct tegra_csi_channel *csi_chan)
248 {
249 struct tegra_vi_channel *chan;
250 struct v4l2_subdev *src_subdev;
251 struct v4l2_ctrl *ctrl;
252
253 chan = v4l2_get_subdev_hostdata(&csi_chan->subdev);
254 src_subdev = tegra_channel_get_remote_source_subdev(chan);
255 ctrl = v4l2_ctrl_find(src_subdev->ctrl_handler, V4L2_CID_PIXEL_RATE);
256 if (ctrl)
257 return v4l2_ctrl_g_ctrl_int64(ctrl);
258
259 return 0;
260 }
261
tegra_csi_calc_settle_time(struct tegra_csi_channel * csi_chan,u8 csi_port_num,u8 * clk_settle_time,u8 * ths_settle_time)262 void tegra_csi_calc_settle_time(struct tegra_csi_channel *csi_chan,
263 u8 csi_port_num,
264 u8 *clk_settle_time,
265 u8 *ths_settle_time)
266 {
267 struct tegra_csi *csi = csi_chan->csi;
268 unsigned int cil_clk_mhz;
269 unsigned int pix_clk_mhz;
270 int clk_idx = (csi_port_num >> 1) + 1;
271
272 cil_clk_mhz = clk_get_rate(csi->clks[clk_idx].clk) / MHZ;
273 pix_clk_mhz = csi_get_pixel_rate(csi_chan) / MHZ;
274
275 /*
276 * CLK Settle time is the interval during which HS receiver should
277 * ignore any clock lane HS transitions, starting from the beginning
278 * of T-CLK-PREPARE.
279 * Per DPHY specification, T-CLK-SETTLE should be between 95ns ~ 300ns
280 *
281 * 95ns < (clk-settle-programmed + 7) * lp clk period < 300ns
282 * midpoint = 197.5 ns
283 */
284 *clk_settle_time = ((95 + 300) * cil_clk_mhz - 14000) / 2000;
285
286 /*
287 * THS Settle time is the interval during which HS receiver should
288 * ignore any data lane HS transitions, starting from the beginning
289 * of THS-PREPARE.
290 *
291 * Per DPHY specification, T-HS-SETTLE should be between 85ns + 6UI
292 * and 145ns+10UI.
293 * 85ns + 6UI < (Ths-settle-prog + 5) * lp_clk_period < 145ns + 10UI
294 * midpoint = 115ns + 8UI
295 */
296 if (pix_clk_mhz)
297 *ths_settle_time = (115 * cil_clk_mhz + 8000 * cil_clk_mhz
298 / (2 * pix_clk_mhz) - 5000) / 1000;
299 }
300
tegra_csi_enable_stream(struct v4l2_subdev * subdev)301 static int tegra_csi_enable_stream(struct v4l2_subdev *subdev)
302 {
303 struct tegra_vi_channel *chan = v4l2_get_subdev_hostdata(subdev);
304 struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
305 struct tegra_csi *csi = csi_chan->csi;
306 int ret, err;
307
308 ret = pm_runtime_resume_and_get(csi->dev);
309 if (ret < 0) {
310 dev_err(csi->dev, "failed to get runtime PM: %d\n", ret);
311 return ret;
312 }
313
314 if (csi_chan->mipi) {
315 ret = tegra_mipi_enable(csi_chan->mipi);
316 if (ret < 0) {
317 dev_err(csi->dev,
318 "failed to enable MIPI pads: %d\n", ret);
319 goto rpm_put;
320 }
321
322 /*
323 * CSI MIPI pads PULLUP, PULLDN and TERM impedances need to
324 * be calibrated after power on.
325 * So, trigger the calibration start here and results will
326 * be latched and applied to the pads when link is in LP11
327 * state during start of sensor streaming.
328 */
329 ret = tegra_mipi_start_calibration(csi_chan->mipi);
330 if (ret < 0) {
331 dev_err(csi->dev,
332 "failed to start MIPI calibration: %d\n", ret);
333 goto disable_mipi;
334 }
335 }
336
337 csi_chan->pg_mode = chan->pg_mode;
338
339 /*
340 * Tegra CSI receiver can detect the first LP to HS transition.
341 * So, start the CSI stream-on prior to sensor stream-on and
342 * vice-versa for stream-off.
343 */
344 ret = csi->ops->csi_start_streaming(csi_chan);
345 if (ret < 0)
346 goto finish_calibration;
347
348 if (csi_chan->mipi) {
349 struct v4l2_subdev *src_subdev;
350 /*
351 * TRM has incorrectly documented to wait for done status from
352 * calibration logic after CSI interface power on.
353 * As per the design, calibration results are latched and applied
354 * to the pads only when the link is in LP11 state which will happen
355 * during the sensor stream-on.
356 * CSI subdev stream-on triggers start of MIPI pads calibration.
357 * Wait for calibration to finish here after sensor subdev stream-on.
358 */
359 src_subdev = tegra_channel_get_remote_source_subdev(chan);
360 ret = v4l2_subdev_call(src_subdev, video, s_stream, true);
361
362 if (ret < 0 && ret != -ENOIOCTLCMD)
363 goto disable_csi_stream;
364
365 err = tegra_mipi_finish_calibration(csi_chan->mipi);
366 if (err < 0)
367 dev_warn(csi->dev, "MIPI calibration failed: %d\n", err);
368 }
369
370 return 0;
371
372 disable_csi_stream:
373 csi->ops->csi_stop_streaming(csi_chan);
374 finish_calibration:
375 if (csi_chan->mipi)
376 tegra_mipi_finish_calibration(csi_chan->mipi);
377 disable_mipi:
378 if (csi_chan->mipi) {
379 err = tegra_mipi_disable(csi_chan->mipi);
380 if (err < 0)
381 dev_err(csi->dev,
382 "failed to disable MIPI pads: %d\n", err);
383 }
384
385 rpm_put:
386 pm_runtime_put(csi->dev);
387 return ret;
388 }
389
tegra_csi_disable_stream(struct v4l2_subdev * subdev)390 static int tegra_csi_disable_stream(struct v4l2_subdev *subdev)
391 {
392 struct tegra_vi_channel *chan = v4l2_get_subdev_hostdata(subdev);
393 struct tegra_csi_channel *csi_chan = to_csi_chan(subdev);
394 struct tegra_csi *csi = csi_chan->csi;
395 int err;
396
397 /*
398 * Stream-off subdevices in reverse order to stream-on.
399 * Remote source subdev in TPG mode is same as CSI subdev.
400 */
401 if (csi_chan->mipi) {
402 struct v4l2_subdev *src_subdev;
403
404 src_subdev = tegra_channel_get_remote_source_subdev(chan);
405 err = v4l2_subdev_call(src_subdev, video, s_stream, false);
406 if (err < 0 && err != -ENOIOCTLCMD)
407 dev_err_probe(csi->dev, err, "source subdev stream off failed\n");
408 }
409
410 csi->ops->csi_stop_streaming(csi_chan);
411
412 if (csi_chan->mipi) {
413 err = tegra_mipi_disable(csi_chan->mipi);
414 if (err < 0)
415 dev_err(csi->dev,
416 "failed to disable MIPI pads: %d\n", err);
417 }
418
419 pm_runtime_put(csi->dev);
420
421 return 0;
422 }
423
tegra_csi_s_stream(struct v4l2_subdev * subdev,int enable)424 static int tegra_csi_s_stream(struct v4l2_subdev *subdev, int enable)
425 {
426 int ret;
427
428 if (enable)
429 ret = tegra_csi_enable_stream(subdev);
430 else
431 ret = tegra_csi_disable_stream(subdev);
432
433 return ret;
434 }
435
436 /*
437 * V4L2 Subdevice Operations
438 */
439 static const struct v4l2_subdev_video_ops tegra_csi_video_ops = {
440 .s_stream = tegra_csi_s_stream,
441 };
442
443 static const struct v4l2_subdev_pad_ops tegra_csi_pad_ops = {
444 .enum_mbus_code = csi_enum_bus_code,
445 .enum_frame_size = csi_enum_framesizes,
446 .enum_frame_interval = csi_enum_frameintervals,
447 .get_fmt = csi_get_format,
448 .set_fmt = csi_set_format,
449 .get_frame_interval = tegra_csi_get_frame_interval,
450 .set_frame_interval = tegra_csi_get_frame_interval,
451 };
452
453 static const struct v4l2_subdev_ops tegra_csi_ops = {
454 .video = &tegra_csi_video_ops,
455 .pad = &tegra_csi_pad_ops,
456 };
457
tegra_csi_channel_alloc(struct tegra_csi * csi,struct device_node * node,unsigned int port_num,unsigned int lanes,unsigned int num_pads)458 static int tegra_csi_channel_alloc(struct tegra_csi *csi,
459 struct device_node *node,
460 unsigned int port_num, unsigned int lanes,
461 unsigned int num_pads)
462 {
463 struct tegra_csi_channel *chan;
464 int ret = 0, i;
465
466 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
467 if (!chan)
468 return -ENOMEM;
469
470 list_add_tail(&chan->list, &csi->csi_chans);
471 chan->csi = csi;
472 /*
473 * Each CSI brick has maximum of 4 lanes.
474 * For lanes more than 4, use multiple of immediate CSI bricks as gang.
475 */
476 if (lanes <= CSI_LANES_PER_BRICK) {
477 chan->numlanes = lanes;
478 chan->numgangports = 1;
479 } else {
480 chan->numlanes = CSI_LANES_PER_BRICK;
481 chan->numgangports = lanes / CSI_LANES_PER_BRICK;
482 }
483
484 for (i = 0; i < chan->numgangports; i++)
485 chan->csi_port_nums[i] = port_num + i * CSI_PORTS_PER_BRICK;
486
487 chan->of_node = of_node_get(node);
488 chan->numpads = num_pads;
489 if (num_pads & 0x2) {
490 chan->pads[0].flags = MEDIA_PAD_FL_SINK;
491 chan->pads[1].flags = MEDIA_PAD_FL_SOURCE;
492 } else {
493 chan->pads[0].flags = MEDIA_PAD_FL_SOURCE;
494 }
495
496 if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
497 return 0;
498
499 chan->mipi = tegra_mipi_request(csi->dev, node);
500 if (IS_ERR(chan->mipi)) {
501 ret = PTR_ERR(chan->mipi);
502 chan->mipi = NULL;
503 dev_err(csi->dev, "failed to get mipi device: %d\n", ret);
504 }
505
506 return ret;
507 }
508
tegra_csi_tpg_channels_alloc(struct tegra_csi * csi)509 static int tegra_csi_tpg_channels_alloc(struct tegra_csi *csi)
510 {
511 struct device_node *node = csi->dev->of_node;
512 unsigned int port_num;
513 unsigned int tpg_channels = csi->soc->csi_max_channels;
514 int ret;
515
516 /* allocate CSI channel for each CSI x2 ports */
517 for (port_num = 0; port_num < tpg_channels; port_num++) {
518 ret = tegra_csi_channel_alloc(csi, node, port_num, 2, 1);
519 if (ret < 0)
520 return ret;
521 }
522
523 return 0;
524 }
525
tegra_csi_channels_alloc(struct tegra_csi * csi)526 static int tegra_csi_channels_alloc(struct tegra_csi *csi)
527 {
528 struct device_node *node = csi->dev->of_node;
529 struct v4l2_fwnode_endpoint v4l2_ep = {
530 .bus_type = V4L2_MBUS_CSI2_DPHY
531 };
532 struct fwnode_handle *fwh;
533 struct device_node *channel;
534 struct device_node *ep;
535 unsigned int lanes, portno, num_pads;
536 int ret;
537
538 for_each_child_of_node(node, channel) {
539 if (!of_node_name_eq(channel, "channel"))
540 continue;
541
542 ret = of_property_read_u32(channel, "reg", &portno);
543 if (ret < 0)
544 continue;
545
546 if (portno >= csi->soc->csi_max_channels) {
547 dev_err(csi->dev, "invalid port num %d for %pOF\n",
548 portno, channel);
549 ret = -EINVAL;
550 goto err_node_put;
551 }
552
553 ep = of_graph_get_endpoint_by_regs(channel, 0, 0);
554 if (!ep)
555 continue;
556
557 fwh = of_fwnode_handle(ep);
558 ret = v4l2_fwnode_endpoint_parse(fwh, &v4l2_ep);
559 of_node_put(ep);
560 if (ret) {
561 dev_err(csi->dev,
562 "failed to parse v4l2 endpoint for %pOF: %d\n",
563 channel, ret);
564 goto err_node_put;
565 }
566
567 lanes = v4l2_ep.bus.mipi_csi2.num_data_lanes;
568 /*
569 * Each CSI brick has maximum 4 data lanes.
570 * For lanes more than 4, validate lanes to be multiple of 4
571 * so multiple of consecutive CSI bricks can be ganged up for
572 * streaming.
573 */
574 if (!lanes || ((lanes & (lanes - 1)) != 0) ||
575 (lanes > CSI_LANES_PER_BRICK && ((portno & 1) != 0))) {
576 dev_err(csi->dev, "invalid data-lanes %d for %pOF\n",
577 lanes, channel);
578 ret = -EINVAL;
579 goto err_node_put;
580 }
581
582 num_pads = of_graph_get_endpoint_count(channel);
583 if (num_pads == TEGRA_CSI_PADS_NUM) {
584 ret = tegra_csi_channel_alloc(csi, channel, portno,
585 lanes, num_pads);
586 if (ret < 0)
587 goto err_node_put;
588 }
589 }
590
591 return 0;
592
593 err_node_put:
594 of_node_put(channel);
595 return ret;
596 }
597
tegra_csi_channel_init(struct tegra_csi_channel * chan)598 static int tegra_csi_channel_init(struct tegra_csi_channel *chan)
599 {
600 struct tegra_csi *csi = chan->csi;
601 struct v4l2_subdev *subdev;
602 int ret;
603
604 /* initialize the default format */
605 chan->format.code = MEDIA_BUS_FMT_SRGGB10_1X10;
606 chan->format.field = V4L2_FIELD_NONE;
607 chan->format.colorspace = V4L2_COLORSPACE_SRGB;
608 chan->format.width = TEGRA_DEF_WIDTH;
609 chan->format.height = TEGRA_DEF_HEIGHT;
610 csi_chan_update_blank_intervals(chan, chan->format.code,
611 chan->format.width,
612 chan->format.height);
613 /* initialize V4L2 subdevice and media entity */
614 subdev = &chan->subdev;
615 v4l2_subdev_init(subdev, &tegra_csi_ops);
616 subdev->dev = csi->dev;
617 if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
618 snprintf(subdev->name, sizeof(subdev->name), "%s-%d", "tpg",
619 chan->csi_port_nums[0]);
620 else
621 snprintf(subdev->name, sizeof(subdev->name), "%s",
622 kbasename(chan->of_node->full_name));
623
624 v4l2_set_subdevdata(subdev, chan);
625 subdev->fwnode = of_fwnode_handle(chan->of_node);
626 subdev->entity.function = MEDIA_ENT_F_VID_IF_BRIDGE;
627
628 /* initialize media entity pads */
629 ret = media_entity_pads_init(&subdev->entity, chan->numpads,
630 chan->pads);
631 if (ret < 0) {
632 dev_err(csi->dev,
633 "failed to initialize media entity: %d\n", ret);
634 subdev->dev = NULL;
635 return ret;
636 }
637
638 if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG)) {
639 ret = v4l2_async_register_subdev(subdev);
640 if (ret < 0) {
641 dev_err(csi->dev,
642 "failed to register subdev: %d\n", ret);
643 return ret;
644 }
645 }
646
647 return 0;
648 }
649
tegra_csi_error_recover(struct v4l2_subdev * sd)650 void tegra_csi_error_recover(struct v4l2_subdev *sd)
651 {
652 struct tegra_csi_channel *csi_chan = to_csi_chan(sd);
653 struct tegra_csi *csi = csi_chan->csi;
654
655 /* stop streaming during error recovery */
656 csi->ops->csi_stop_streaming(csi_chan);
657 csi->ops->csi_err_recover(csi_chan);
658 csi->ops->csi_start_streaming(csi_chan);
659 }
660
tegra_csi_channels_init(struct tegra_csi * csi)661 static int tegra_csi_channels_init(struct tegra_csi *csi)
662 {
663 struct tegra_csi_channel *chan;
664 int ret;
665
666 list_for_each_entry(chan, &csi->csi_chans, list) {
667 ret = tegra_csi_channel_init(chan);
668 if (ret) {
669 dev_err(csi->dev,
670 "failed to initialize channel-%d: %d\n",
671 chan->csi_port_nums[0], ret);
672 return ret;
673 }
674 }
675
676 return 0;
677 }
678
tegra_csi_channels_cleanup(struct tegra_csi * csi)679 static void tegra_csi_channels_cleanup(struct tegra_csi *csi)
680 {
681 struct v4l2_subdev *subdev;
682 struct tegra_csi_channel *chan, *tmp;
683
684 list_for_each_entry_safe(chan, tmp, &csi->csi_chans, list) {
685 if (chan->mipi)
686 tegra_mipi_free(chan->mipi);
687
688 subdev = &chan->subdev;
689 if (subdev->dev) {
690 if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
691 v4l2_async_unregister_subdev(subdev);
692 media_entity_cleanup(&subdev->entity);
693 }
694
695 of_node_put(chan->of_node);
696 list_del(&chan->list);
697 kfree(chan);
698 }
699 }
700
csi_runtime_suspend(struct device * dev)701 static int __maybe_unused csi_runtime_suspend(struct device *dev)
702 {
703 struct tegra_csi *csi = dev_get_drvdata(dev);
704
705 clk_bulk_disable_unprepare(csi->soc->num_clks, csi->clks);
706
707 return 0;
708 }
709
csi_runtime_resume(struct device * dev)710 static int __maybe_unused csi_runtime_resume(struct device *dev)
711 {
712 struct tegra_csi *csi = dev_get_drvdata(dev);
713 int ret;
714
715 ret = clk_bulk_prepare_enable(csi->soc->num_clks, csi->clks);
716 if (ret < 0) {
717 dev_err(csi->dev, "failed to enable clocks: %d\n", ret);
718 return ret;
719 }
720
721 return 0;
722 }
723
tegra_csi_init(struct host1x_client * client)724 static int tegra_csi_init(struct host1x_client *client)
725 {
726 struct tegra_csi *csi = host1x_client_to_csi(client);
727 struct tegra_video_device *vid = dev_get_drvdata(client->host);
728 int ret;
729
730 INIT_LIST_HEAD(&csi->csi_chans);
731
732 if (IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
733 ret = tegra_csi_tpg_channels_alloc(csi);
734 else
735 ret = tegra_csi_channels_alloc(csi);
736 if (ret < 0) {
737 dev_err(csi->dev,
738 "failed to allocate channels: %d\n", ret);
739 goto cleanup;
740 }
741
742 ret = tegra_csi_channels_init(csi);
743 if (ret < 0)
744 goto cleanup;
745
746 vid->csi = csi;
747
748 return 0;
749
750 cleanup:
751 tegra_csi_channels_cleanup(csi);
752 return ret;
753 }
754
tegra_csi_exit(struct host1x_client * client)755 static int tegra_csi_exit(struct host1x_client *client)
756 {
757 struct tegra_csi *csi = host1x_client_to_csi(client);
758
759 tegra_csi_channels_cleanup(csi);
760
761 return 0;
762 }
763
764 static const struct host1x_client_ops csi_client_ops = {
765 .init = tegra_csi_init,
766 .exit = tegra_csi_exit,
767 };
768
tegra_csi_probe(struct platform_device * pdev)769 static int tegra_csi_probe(struct platform_device *pdev)
770 {
771 struct tegra_csi *csi;
772 unsigned int i;
773 int ret;
774
775 csi = devm_kzalloc(&pdev->dev, sizeof(*csi), GFP_KERNEL);
776 if (!csi)
777 return -ENOMEM;
778
779 csi->iomem = devm_platform_ioremap_resource(pdev, 0);
780 if (IS_ERR(csi->iomem))
781 return PTR_ERR(csi->iomem);
782
783 csi->soc = of_device_get_match_data(&pdev->dev);
784
785 csi->clks = devm_kcalloc(&pdev->dev, csi->soc->num_clks,
786 sizeof(*csi->clks), GFP_KERNEL);
787 if (!csi->clks)
788 return -ENOMEM;
789
790 for (i = 0; i < csi->soc->num_clks; i++)
791 csi->clks[i].id = csi->soc->clk_names[i];
792
793 ret = devm_clk_bulk_get(&pdev->dev, csi->soc->num_clks, csi->clks);
794 if (ret) {
795 dev_err(&pdev->dev, "failed to get the clocks: %d\n", ret);
796 return ret;
797 }
798
799 if (!pdev->dev.pm_domain) {
800 ret = -ENOENT;
801 dev_warn(&pdev->dev, "PM domain is not attached: %d\n", ret);
802 return ret;
803 }
804
805 csi->dev = &pdev->dev;
806 csi->ops = csi->soc->ops;
807 platform_set_drvdata(pdev, csi);
808 pm_runtime_enable(&pdev->dev);
809
810 /* initialize host1x interface */
811 INIT_LIST_HEAD(&csi->client.list);
812 csi->client.ops = &csi_client_ops;
813 csi->client.dev = &pdev->dev;
814
815 ret = host1x_client_register(&csi->client);
816 if (ret < 0) {
817 dev_err(&pdev->dev,
818 "failed to register host1x client: %d\n", ret);
819 goto rpm_disable;
820 }
821
822 return 0;
823
824 rpm_disable:
825 pm_runtime_disable(&pdev->dev);
826 return ret;
827 }
828
tegra_csi_remove(struct platform_device * pdev)829 static void tegra_csi_remove(struct platform_device *pdev)
830 {
831 struct tegra_csi *csi = platform_get_drvdata(pdev);
832
833 host1x_client_unregister(&csi->client);
834
835 pm_runtime_disable(&pdev->dev);
836 }
837
838 #if defined(CONFIG_ARCH_TEGRA_210_SOC)
839 extern const struct tegra_csi_soc tegra210_csi_soc;
840 #endif
841
842 static const struct of_device_id tegra_csi_of_id_table[] = {
843 #if defined(CONFIG_ARCH_TEGRA_210_SOC)
844 { .compatible = "nvidia,tegra210-csi", .data = &tegra210_csi_soc },
845 #endif
846 { }
847 };
848 MODULE_DEVICE_TABLE(of, tegra_csi_of_id_table);
849
850 static const struct dev_pm_ops tegra_csi_pm_ops = {
851 SET_RUNTIME_PM_OPS(csi_runtime_suspend, csi_runtime_resume, NULL)
852 };
853
854 struct platform_driver tegra_csi_driver = {
855 .driver = {
856 .name = "tegra-csi",
857 .of_match_table = tegra_csi_of_id_table,
858 .pm = &tegra_csi_pm_ops,
859 },
860 .probe = tegra_csi_probe,
861 .remove_new = tegra_csi_remove,
862 };
863