1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * R-Car Display Unit CRTCs
4 *
5 * Copyright (C) 2013-2015 Renesas Electronics Corporation
6 *
7 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
8 */
9
10 #include <linux/clk.h>
11 #include <linux/mutex.h>
12 #include <linux/platform_device.h>
13
14 #include <drm/drm_atomic.h>
15 #include <drm/drm_atomic_helper.h>
16 #include <drm/drm_bridge.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_device.h>
19 #include <drm/drm_gem_dma_helper.h>
20 #include <drm/drm_print.h>
21 #include <drm/drm_vblank.h>
22
23 #include "rcar_cmm.h"
24 #include "rcar_du_crtc.h"
25 #include "rcar_du_drv.h"
26 #include "rcar_du_encoder.h"
27 #include "rcar_du_kms.h"
28 #include "rcar_du_plane.h"
29 #include "rcar_du_regs.h"
30 #include "rcar_du_vsp.h"
31 #include "rcar_lvds.h"
32 #include "rcar_mipi_dsi.h"
33
rcar_du_crtc_read(struct rcar_du_crtc * rcrtc,u32 reg)34 static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
35 {
36 struct rcar_du_device *rcdu = rcrtc->dev;
37
38 return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
39 }
40
rcar_du_crtc_write(struct rcar_du_crtc * rcrtc,u32 reg,u32 data)41 static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
42 {
43 struct rcar_du_device *rcdu = rcrtc->dev;
44
45 rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
46 }
47
rcar_du_crtc_clr(struct rcar_du_crtc * rcrtc,u32 reg,u32 clr)48 static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
49 {
50 struct rcar_du_device *rcdu = rcrtc->dev;
51
52 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
53 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
54 }
55
rcar_du_crtc_set(struct rcar_du_crtc * rcrtc,u32 reg,u32 set)56 static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
57 {
58 struct rcar_du_device *rcdu = rcrtc->dev;
59
60 rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
61 rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
62 }
63
rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc * rcrtc,u32 clr,u32 set)64 void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set)
65 {
66 struct rcar_du_device *rcdu = rcrtc->dev;
67
68 rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set;
69 rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr);
70 }
71
72 /* -----------------------------------------------------------------------------
73 * Hardware Setup
74 */
75
76 struct dpll_info {
77 unsigned int output;
78 unsigned int fdpll;
79 unsigned int n;
80 unsigned int m;
81 };
82
rcar_du_dpll_divider(struct rcar_du_crtc * rcrtc,struct dpll_info * dpll,unsigned long input,unsigned long target)83 static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
84 struct dpll_info *dpll,
85 unsigned long input,
86 unsigned long target)
87 {
88 unsigned long best_diff = (unsigned long)-1;
89 unsigned long diff;
90 unsigned int fdpll;
91 unsigned int m;
92 unsigned int n;
93
94 /*
95 * fin fvco fout fclkout
96 * in --> [1/M] --> |PD| -> [LPF] -> [VCO] -> [1/P] -+-> [1/FDPLL] -> out
97 * +-> | | |
98 * | |
99 * +---------------- [1/N] <------------+
100 *
101 * fclkout = fvco / P / FDPLL -- (1)
102 *
103 * fin/M = fvco/P/N
104 *
105 * fvco = fin * P * N / M -- (2)
106 *
107 * (1) + (2) indicates
108 *
109 * fclkout = fin * N / M / FDPLL
110 *
111 * NOTES
112 * N : (n + 1)
113 * M : (m + 1)
114 * FDPLL : (fdpll + 1)
115 * P : 2
116 * 2kHz < fvco < 4096MHz
117 *
118 * To minimize the jitter,
119 * N : as large as possible
120 * M : as small as possible
121 */
122 for (m = 0; m < 4; m++) {
123 for (n = 119; n > 38; n--) {
124 /*
125 * This code only runs on 64-bit architectures, the
126 * unsigned long type can thus be used for 64-bit
127 * computation. It will still compile without any
128 * warning on 32-bit architectures.
129 *
130 * To optimize calculations, use fout instead of fvco
131 * to verify the VCO frequency constraint.
132 */
133 unsigned long fout = input * (n + 1) / (m + 1);
134
135 if (fout < 1000 || fout > 2048 * 1000 * 1000U)
136 continue;
137
138 for (fdpll = 1; fdpll < 32; fdpll++) {
139 unsigned long output;
140
141 output = fout / (fdpll + 1);
142 if (output >= 400 * 1000 * 1000)
143 continue;
144
145 diff = abs((long)output - (long)target);
146 if (best_diff > diff) {
147 best_diff = diff;
148 dpll->n = n;
149 dpll->m = m;
150 dpll->fdpll = fdpll;
151 dpll->output = output;
152 }
153
154 if (diff == 0)
155 goto done;
156 }
157 }
158 }
159
160 done:
161 dev_dbg(rcrtc->dev->dev,
162 "output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
163 dpll->output, dpll->fdpll, dpll->n, dpll->m, best_diff);
164 }
165
166 struct du_clk_params {
167 struct clk *clk;
168 unsigned long rate;
169 unsigned long diff;
170 u32 escr;
171 };
172
rcar_du_escr_divider(struct clk * clk,unsigned long target,u32 escr,struct du_clk_params * params)173 static void rcar_du_escr_divider(struct clk *clk, unsigned long target,
174 u32 escr, struct du_clk_params *params)
175 {
176 unsigned long rate;
177 unsigned long diff;
178 u32 div;
179
180 /*
181 * If the target rate has already been achieved perfectly we can't do
182 * better.
183 */
184 if (params->diff == 0)
185 return;
186
187 /*
188 * Compute the input clock rate and internal divisor values to obtain
189 * the clock rate closest to the target frequency.
190 */
191 rate = clk_round_rate(clk, target);
192 div = clamp(DIV_ROUND_CLOSEST(rate, target), 1UL, 64UL) - 1;
193 diff = abs(rate / (div + 1) - target);
194
195 /*
196 * Store the parameters if the resulting frequency is better than any
197 * previously calculated value.
198 */
199 if (diff < params->diff) {
200 params->clk = clk;
201 params->rate = rate;
202 params->diff = diff;
203 params->escr = escr | div;
204 }
205 }
206
rcar_du_crtc_set_display_timing(struct rcar_du_crtc * rcrtc)207 static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
208 {
209 const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
210 struct rcar_du_device *rcdu = rcrtc->dev;
211 unsigned long mode_clock = mode->clock * 1000;
212 unsigned int hdse_offset;
213 u32 dsmr;
214 u32 escr;
215
216 if (rcdu->info->dpll_mask & (1 << rcrtc->index)) {
217 unsigned long target = mode_clock;
218 struct dpll_info dpll = { 0 };
219 unsigned long extclk;
220 u32 dpllcr;
221 u32 div = 0;
222
223 /*
224 * DU channels that have a display PLL can't use the internal
225 * system clock, and have no internal clock divider.
226 */
227 extclk = clk_get_rate(rcrtc->extclock);
228 rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
229
230 dpllcr = DPLLCR_CODE | DPLLCR_CLKE
231 | DPLLCR_FDPLL(dpll.fdpll)
232 | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
233 | DPLLCR_STBY;
234
235 if (rcrtc->index == 1)
236 dpllcr |= DPLLCR_PLCS1
237 | DPLLCR_INCS_DOTCLKIN1;
238 else
239 dpllcr |= DPLLCR_PLCS0
240 | DPLLCR_INCS_DOTCLKIN0;
241
242 rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
243
244 escr = ESCR_DCLKSEL_DCLKIN | div;
245 } else if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) ||
246 rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) {
247 /*
248 * Use the external LVDS or DSI PLL output as the dot clock when
249 * outputting to the LVDS or DSI encoder on an SoC that supports
250 * this clock routing option. We use the clock directly in that
251 * case, without any additional divider.
252 */
253 escr = ESCR_DCLKSEL_DCLKIN;
254 } else {
255 struct du_clk_params params = { .diff = (unsigned long)-1 };
256
257 rcar_du_escr_divider(rcrtc->clock, mode_clock,
258 ESCR_DCLKSEL_CLKS, ¶ms);
259 if (rcrtc->extclock)
260 rcar_du_escr_divider(rcrtc->extclock, mode_clock,
261 ESCR_DCLKSEL_DCLKIN, ¶ms);
262
263 dev_dbg(rcrtc->dev->dev, "mode clock %lu %s rate %lu\n",
264 mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext",
265 params.rate);
266
267 clk_set_rate(params.clk, params.rate);
268 escr = params.escr;
269 }
270
271 /*
272 * The ESCR register only exists in DU channels that can output to an
273 * LVDS or DPAT, and the OTAR register in DU channels that can output
274 * to a DPAD.
275 */
276 if ((rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs |
277 rcdu->info->routes[RCAR_DU_OUTPUT_DPAD1].possible_crtcs |
278 rcdu->info->routes[RCAR_DU_OUTPUT_LVDS0].possible_crtcs |
279 rcdu->info->routes[RCAR_DU_OUTPUT_LVDS1].possible_crtcs) &
280 BIT(rcrtc->index)) {
281 dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
282
283 rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
284 }
285
286 if ((rcdu->info->routes[RCAR_DU_OUTPUT_DPAD0].possible_crtcs |
287 rcdu->info->routes[RCAR_DU_OUTPUT_DPAD1].possible_crtcs) &
288 BIT(rcrtc->index))
289 rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
290
291 /* Signal polarities */
292 dsmr = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
293 | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
294 | ((mode->flags & DRM_MODE_FLAG_INTERLACE) ? DSMR_ODEV : 0)
295 | DSMR_DIPM_DISP | DSMR_CSPM;
296 rcar_du_crtc_write(rcrtc, DSMR, dsmr);
297
298 /*
299 * When the CMM is enabled, an additional offset of 25 pixels must be
300 * subtracted from the HDS (horizontal display start) and HDE
301 * (horizontal display end) registers.
302 */
303 hdse_offset = 19;
304 if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2))
305 hdse_offset += 25;
306
307 /* Display timings */
308 rcar_du_crtc_write(rcrtc, HDSR, mode->htotal - mode->hsync_start -
309 hdse_offset);
310 rcar_du_crtc_write(rcrtc, HDER, mode->htotal - mode->hsync_start +
311 mode->hdisplay - hdse_offset);
312 rcar_du_crtc_write(rcrtc, HSWR, mode->hsync_end -
313 mode->hsync_start - 1);
314 rcar_du_crtc_write(rcrtc, HCR, mode->htotal - 1);
315
316 rcar_du_crtc_write(rcrtc, VDSR, mode->crtc_vtotal -
317 mode->crtc_vsync_end - 2);
318 rcar_du_crtc_write(rcrtc, VDER, mode->crtc_vtotal -
319 mode->crtc_vsync_end +
320 mode->crtc_vdisplay - 2);
321 rcar_du_crtc_write(rcrtc, VSPR, mode->crtc_vtotal -
322 mode->crtc_vsync_end +
323 mode->crtc_vsync_start - 1);
324 rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
325
326 rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
327 rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
328 }
329
plane_zpos(struct rcar_du_plane * plane)330 static unsigned int plane_zpos(struct rcar_du_plane *plane)
331 {
332 return plane->plane.state->normalized_zpos;
333 }
334
335 static const struct rcar_du_format_info *
plane_format(struct rcar_du_plane * plane)336 plane_format(struct rcar_du_plane *plane)
337 {
338 return to_rcar_plane_state(plane->plane.state)->format;
339 }
340
rcar_du_crtc_update_planes(struct rcar_du_crtc * rcrtc)341 static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
342 {
343 struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
344 struct rcar_du_device *rcdu = rcrtc->dev;
345 unsigned int num_planes = 0;
346 unsigned int dptsr_planes;
347 unsigned int hwplanes = 0;
348 unsigned int prio = 0;
349 unsigned int i;
350 u32 dspr = 0;
351
352 for (i = 0; i < rcrtc->group->num_planes; ++i) {
353 struct rcar_du_plane *plane = &rcrtc->group->planes[i];
354 unsigned int j;
355
356 if (plane->plane.state->crtc != &rcrtc->crtc ||
357 !plane->plane.state->visible)
358 continue;
359
360 /* Insert the plane in the sorted planes array. */
361 for (j = num_planes++; j > 0; --j) {
362 if (plane_zpos(planes[j-1]) <= plane_zpos(plane))
363 break;
364 planes[j] = planes[j-1];
365 }
366
367 planes[j] = plane;
368 prio += plane_format(plane)->planes * 4;
369 }
370
371 for (i = 0; i < num_planes; ++i) {
372 struct rcar_du_plane *plane = planes[i];
373 struct drm_plane_state *state = plane->plane.state;
374 unsigned int index = to_rcar_plane_state(state)->hwindex;
375
376 prio -= 4;
377 dspr |= (index + 1) << prio;
378 hwplanes |= 1 << index;
379
380 if (plane_format(plane)->planes == 2) {
381 index = (index + 1) % 8;
382
383 prio -= 4;
384 dspr |= (index + 1) << prio;
385 hwplanes |= 1 << index;
386 }
387 }
388
389 /* If VSP+DU integration is enabled the plane assignment is fixed. */
390 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE)) {
391 if (rcdu->info->gen < 3) {
392 dspr = (rcrtc->index % 2) + 1;
393 hwplanes = 1 << (rcrtc->index % 2);
394 } else {
395 dspr = (rcrtc->index % 2) ? 3 : 1;
396 hwplanes = 1 << ((rcrtc->index % 2) ? 2 : 0);
397 }
398 }
399
400 /*
401 * Update the planes to display timing and dot clock generator
402 * associations.
403 *
404 * Updating the DPTSR register requires restarting the CRTC group,
405 * resulting in visible flicker. To mitigate the issue only update the
406 * association if needed by enabled planes. Planes being disabled will
407 * keep their current association.
408 */
409 mutex_lock(&rcrtc->group->lock);
410
411 dptsr_planes = rcrtc->index % 2 ? rcrtc->group->dptsr_planes | hwplanes
412 : rcrtc->group->dptsr_planes & ~hwplanes;
413
414 if (dptsr_planes != rcrtc->group->dptsr_planes) {
415 rcar_du_group_write(rcrtc->group, DPTSR,
416 (dptsr_planes << 16) | dptsr_planes);
417 rcrtc->group->dptsr_planes = dptsr_planes;
418
419 if (rcrtc->group->used_crtcs)
420 rcar_du_group_restart(rcrtc->group);
421 }
422
423 /* Restart the group if plane sources have changed. */
424 if (rcrtc->group->need_restart)
425 rcar_du_group_restart(rcrtc->group);
426
427 mutex_unlock(&rcrtc->group->lock);
428
429 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
430 dspr);
431 }
432
433 /* -----------------------------------------------------------------------------
434 * Page Flip
435 */
436
rcar_du_crtc_finish_page_flip(struct rcar_du_crtc * rcrtc)437 void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
438 {
439 struct drm_pending_vblank_event *event;
440 struct drm_device *dev = rcrtc->crtc.dev;
441 unsigned long flags;
442
443 spin_lock_irqsave(&dev->event_lock, flags);
444 event = rcrtc->event;
445 rcrtc->event = NULL;
446 spin_unlock_irqrestore(&dev->event_lock, flags);
447
448 if (event == NULL)
449 return;
450
451 spin_lock_irqsave(&dev->event_lock, flags);
452 drm_crtc_send_vblank_event(&rcrtc->crtc, event);
453 wake_up(&rcrtc->flip_wait);
454 spin_unlock_irqrestore(&dev->event_lock, flags);
455
456 drm_crtc_vblank_put(&rcrtc->crtc);
457 }
458
rcar_du_crtc_page_flip_pending(struct rcar_du_crtc * rcrtc)459 static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
460 {
461 struct drm_device *dev = rcrtc->crtc.dev;
462 unsigned long flags;
463 bool pending;
464
465 spin_lock_irqsave(&dev->event_lock, flags);
466 pending = rcrtc->event != NULL;
467 spin_unlock_irqrestore(&dev->event_lock, flags);
468
469 return pending;
470 }
471
rcar_du_crtc_wait_page_flip(struct rcar_du_crtc * rcrtc)472 static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
473 {
474 struct rcar_du_device *rcdu = rcrtc->dev;
475
476 if (wait_event_timeout(rcrtc->flip_wait,
477 !rcar_du_crtc_page_flip_pending(rcrtc),
478 msecs_to_jiffies(50)))
479 return;
480
481 dev_warn(rcdu->dev, "page flip timeout\n");
482
483 rcar_du_crtc_finish_page_flip(rcrtc);
484 }
485
486 /* -----------------------------------------------------------------------------
487 * Color Management Module (CMM)
488 */
489
rcar_du_cmm_check(struct drm_crtc * crtc,struct drm_crtc_state * state)490 static int rcar_du_cmm_check(struct drm_crtc *crtc,
491 struct drm_crtc_state *state)
492 {
493 struct drm_property_blob *drm_lut = state->gamma_lut;
494 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
495 struct device *dev = rcrtc->dev->dev;
496
497 if (!drm_lut)
498 return 0;
499
500 /* We only accept fully populated LUT tables. */
501 if (drm_color_lut_size(drm_lut) != CM2_LUT_SIZE) {
502 dev_err(dev, "invalid gamma lut size: %zu bytes\n",
503 drm_lut->length);
504 return -EINVAL;
505 }
506
507 return 0;
508 }
509
rcar_du_cmm_setup(struct drm_crtc * crtc)510 static void rcar_du_cmm_setup(struct drm_crtc *crtc)
511 {
512 struct drm_property_blob *drm_lut = crtc->state->gamma_lut;
513 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
514 struct rcar_cmm_config cmm_config = {};
515
516 if (!rcrtc->cmm)
517 return;
518
519 if (drm_lut)
520 cmm_config.lut.table = (struct drm_color_lut *)drm_lut->data;
521
522 rcar_cmm_setup(rcrtc->cmm, &cmm_config);
523 }
524
525 /* -----------------------------------------------------------------------------
526 * Start/Stop and Suspend/Resume
527 */
528
rcar_du_crtc_setup(struct rcar_du_crtc * rcrtc)529 static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
530 {
531 /* Set display off and background to black */
532 rcar_du_crtc_write(rcrtc, DOOR, DOOR_RGB(0, 0, 0));
533 rcar_du_crtc_write(rcrtc, BPOR, BPOR_RGB(0, 0, 0));
534
535 /* Configure display timings and output routing */
536 rcar_du_crtc_set_display_timing(rcrtc);
537 rcar_du_group_set_routing(rcrtc->group);
538
539 /* Start with all planes disabled. */
540 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
541
542 /* Enable the VSP compositor. */
543 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
544 rcar_du_vsp_enable(rcrtc);
545
546 /* Turn vertical blanking interrupt reporting on. */
547 drm_crtc_vblank_on(&rcrtc->crtc);
548 }
549
rcar_du_crtc_get(struct rcar_du_crtc * rcrtc)550 static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
551 {
552 int ret;
553
554 /*
555 * Guard against double-get, as the function is called from both the
556 * .atomic_enable() and .atomic_begin() handlers.
557 */
558 if (rcrtc->initialized)
559 return 0;
560
561 ret = clk_prepare_enable(rcrtc->clock);
562 if (ret < 0)
563 return ret;
564
565 ret = clk_prepare_enable(rcrtc->extclock);
566 if (ret < 0)
567 goto error_clock;
568
569 ret = rcar_du_group_get(rcrtc->group);
570 if (ret < 0)
571 goto error_group;
572
573 rcar_du_crtc_setup(rcrtc);
574 rcrtc->initialized = true;
575
576 return 0;
577
578 error_group:
579 clk_disable_unprepare(rcrtc->extclock);
580 error_clock:
581 clk_disable_unprepare(rcrtc->clock);
582 return ret;
583 }
584
rcar_du_crtc_put(struct rcar_du_crtc * rcrtc)585 static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
586 {
587 rcar_du_group_put(rcrtc->group);
588
589 clk_disable_unprepare(rcrtc->extclock);
590 clk_disable_unprepare(rcrtc->clock);
591
592 rcrtc->initialized = false;
593 }
594
rcar_du_crtc_start(struct rcar_du_crtc * rcrtc)595 static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
596 {
597 bool interlaced;
598
599 /*
600 * Select master sync mode. This enables display operation in master
601 * sync mode (with the HSYNC and VSYNC signals configured as outputs and
602 * actively driven).
603 */
604 interlaced = rcrtc->crtc.mode.flags & DRM_MODE_FLAG_INTERLACE;
605 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK | DSYSR_SCM_MASK,
606 (interlaced ? DSYSR_SCM_INT_VIDEO : 0) |
607 DSYSR_TVM_MASTER);
608
609 rcar_du_group_start_stop(rcrtc->group, true);
610 }
611
rcar_du_crtc_disable_planes(struct rcar_du_crtc * rcrtc)612 static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
613 {
614 struct rcar_du_device *rcdu = rcrtc->dev;
615 struct drm_crtc *crtc = &rcrtc->crtc;
616 u32 status;
617
618 /* Make sure vblank interrupts are enabled. */
619 drm_crtc_vblank_get(crtc);
620
621 /*
622 * Disable planes and calculate how many vertical blanking interrupts we
623 * have to wait for. If a vertical blanking interrupt has been triggered
624 * but not processed yet, we don't know whether it occurred before or
625 * after the planes got disabled. We thus have to wait for two vblank
626 * interrupts in that case.
627 */
628 spin_lock_irq(&rcrtc->vblank_lock);
629 rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
630 status = rcar_du_crtc_read(rcrtc, DSSR);
631 rcrtc->vblank_count = status & DSSR_VBK ? 2 : 1;
632 spin_unlock_irq(&rcrtc->vblank_lock);
633
634 if (!wait_event_timeout(rcrtc->vblank_wait, rcrtc->vblank_count == 0,
635 msecs_to_jiffies(100)))
636 dev_warn(rcdu->dev, "vertical blanking timeout\n");
637
638 drm_crtc_vblank_put(crtc);
639 }
640
rcar_du_crtc_stop(struct rcar_du_crtc * rcrtc)641 static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
642 {
643 struct drm_crtc *crtc = &rcrtc->crtc;
644
645 /*
646 * Disable all planes and wait for the change to take effect. This is
647 * required as the plane enable registers are updated on vblank, and no
648 * vblank will occur once the CRTC is stopped. Disabling planes when
649 * starting the CRTC thus wouldn't be enough as it would start scanning
650 * out immediately from old frame buffers until the next vblank.
651 *
652 * This increases the CRTC stop delay, especially when multiple CRTCs
653 * are stopped in one operation as we now wait for one vblank per CRTC.
654 * Whether this can be improved needs to be researched.
655 */
656 rcar_du_crtc_disable_planes(rcrtc);
657
658 /*
659 * Disable vertical blanking interrupt reporting. We first need to wait
660 * for page flip completion before stopping the CRTC as userspace
661 * expects page flips to eventually complete.
662 */
663 rcar_du_crtc_wait_page_flip(rcrtc);
664 drm_crtc_vblank_off(crtc);
665
666 /* Disable the VSP compositor. */
667 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
668 rcar_du_vsp_disable(rcrtc);
669
670 if (rcrtc->cmm)
671 rcar_cmm_disable(rcrtc->cmm);
672
673 /*
674 * Select switch sync mode. This stops display operation and configures
675 * the HSYNC and VSYNC signals as inputs.
676 *
677 * TODO: Find another way to stop the display for DUs that don't support
678 * TVM sync.
679 */
680 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_TVM_SYNC))
681 rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK,
682 DSYSR_TVM_SWITCH);
683
684 rcar_du_group_start_stop(rcrtc->group, false);
685 }
686
687 /* -----------------------------------------------------------------------------
688 * CRTC Functions
689 */
690
rcar_du_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)691 static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
692 struct drm_atomic_state *state)
693 {
694 struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
695 crtc);
696 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc_state);
697 struct drm_encoder *encoder;
698 int ret;
699
700 ret = rcar_du_cmm_check(crtc, crtc_state);
701 if (ret)
702 return ret;
703
704 /* Store the routes from the CRTC output to the DU outputs. */
705 rstate->outputs = 0;
706
707 drm_for_each_encoder_mask(encoder, crtc->dev,
708 crtc_state->encoder_mask) {
709 struct rcar_du_encoder *renc;
710
711 /* Skip the writeback encoder. */
712 if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
713 continue;
714
715 renc = to_rcar_encoder(encoder);
716 rstate->outputs |= BIT(renc->output);
717 }
718
719 return 0;
720 }
721
rcar_du_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)722 static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
723 struct drm_atomic_state *state)
724 {
725 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
726 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
727 struct rcar_du_device *rcdu = rcrtc->dev;
728
729 if (rcrtc->cmm)
730 rcar_cmm_enable(rcrtc->cmm);
731 rcar_du_crtc_get(rcrtc);
732
733 /*
734 * On D3/E3 the dot clock is provided by the LVDS encoder attached to
735 * the DU channel. We need to enable its clock output explicitly before
736 * starting the CRTC, as the bridge hasn't been enabled by the atomic
737 * helpers yet.
738 */
739 if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
740 bool dot_clk_only = rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0);
741 struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
742 const struct drm_display_mode *mode =
743 &crtc->state->adjusted_mode;
744
745 rcar_lvds_pclk_enable(bridge, mode->clock * 1000, dot_clk_only);
746 }
747
748 /*
749 * Similarly to LVDS, on V3U the dot clock is provided by the DSI
750 * encoder, and we need to enable the DSI clocks before enabling the CRTC.
751 */
752 if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
753 (rstate->outputs &
754 (BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) {
755 struct drm_bridge *bridge = rcdu->dsi[rcrtc->index];
756
757 rcar_mipi_dsi_pclk_enable(bridge, state);
758 }
759
760 rcar_du_crtc_start(rcrtc);
761
762 /*
763 * TODO: The chip manual indicates that CMM tables should be written
764 * after the DU channel has been activated. Investigate the impact
765 * of this restriction on the first displayed frame.
766 */
767 rcar_du_cmm_setup(crtc);
768 }
769
rcar_du_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)770 static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
771 struct drm_atomic_state *state)
772 {
773 struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
774 crtc);
775 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
776 struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
777 struct rcar_du_device *rcdu = rcrtc->dev;
778
779 rcar_du_crtc_stop(rcrtc);
780 rcar_du_crtc_put(rcrtc);
781
782 if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index)) {
783 bool dot_clk_only = rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0);
784 struct drm_bridge *bridge = rcdu->lvds[rcrtc->index];
785
786 /*
787 * Disable the LVDS clock output, see
788 * rcar_du_crtc_atomic_enable(). When the LVDS output is used,
789 * this also disables the LVDS encoder.
790 */
791 rcar_lvds_pclk_disable(bridge, dot_clk_only);
792 }
793
794 if ((rcdu->info->dsi_clk_mask & BIT(rcrtc->index)) &&
795 (rstate->outputs &
796 (BIT(RCAR_DU_OUTPUT_DSI0) | BIT(RCAR_DU_OUTPUT_DSI1)))) {
797 struct drm_bridge *bridge = rcdu->dsi[rcrtc->index];
798
799 /*
800 * Disable the DSI clock output, see
801 * rcar_du_crtc_atomic_enable().
802 */
803 rcar_mipi_dsi_pclk_disable(bridge);
804 }
805
806 spin_lock_irq(&crtc->dev->event_lock);
807 if (crtc->state->event) {
808 drm_crtc_send_vblank_event(crtc, crtc->state->event);
809 crtc->state->event = NULL;
810 }
811 spin_unlock_irq(&crtc->dev->event_lock);
812 }
813
rcar_du_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)814 static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
815 struct drm_atomic_state *state)
816 {
817 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
818
819 WARN_ON(!crtc->state->enable);
820
821 /*
822 * If a mode set is in progress we can be called with the CRTC disabled.
823 * We thus need to first get and setup the CRTC in order to configure
824 * planes. We must *not* put the CRTC in .atomic_flush(), as it must be
825 * kept awake until the .atomic_enable() call that will follow. The get
826 * operation in .atomic_enable() will in that case be a no-op, and the
827 * CRTC will be put later in .atomic_disable().
828 *
829 * If a mode set is not in progress the CRTC is enabled, and the
830 * following get call will be a no-op. There is thus no need to balance
831 * it in .atomic_flush() either.
832 */
833 rcar_du_crtc_get(rcrtc);
834
835 /* If the active state changed, we let .atomic_enable handle CMM. */
836 if (crtc->state->color_mgmt_changed && !crtc->state->active_changed)
837 rcar_du_cmm_setup(crtc);
838
839 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
840 rcar_du_vsp_atomic_begin(rcrtc);
841 }
842
rcar_du_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)843 static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
844 struct drm_atomic_state *state)
845 {
846 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
847 struct drm_device *dev = rcrtc->crtc.dev;
848 unsigned long flags;
849
850 rcar_du_crtc_update_planes(rcrtc);
851
852 if (crtc->state->event) {
853 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
854
855 spin_lock_irqsave(&dev->event_lock, flags);
856 rcrtc->event = crtc->state->event;
857 crtc->state->event = NULL;
858 spin_unlock_irqrestore(&dev->event_lock, flags);
859 }
860
861 if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
862 rcar_du_vsp_atomic_flush(rcrtc);
863 }
864
865 static enum drm_mode_status
rcar_du_crtc_mode_valid(struct drm_crtc * crtc,const struct drm_display_mode * mode)866 rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
867 const struct drm_display_mode *mode)
868 {
869 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
870 struct rcar_du_device *rcdu = rcrtc->dev;
871 bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
872 unsigned int min_sync_porch;
873 unsigned int vbp;
874
875 if (interlaced && !rcar_du_has(rcdu, RCAR_DU_FEATURE_INTERLACED))
876 return MODE_NO_INTERLACE;
877
878 /*
879 * The hardware requires a minimum combined horizontal sync and back
880 * porch of 20 pixels (when CMM isn't used) or 45 pixels (when CMM is
881 * used), and a minimum vertical back porch of 3 lines.
882 */
883 min_sync_porch = 20;
884 if (rcrtc->group->cmms_mask & BIT(rcrtc->index % 2))
885 min_sync_porch += 25;
886
887 if (mode->htotal - mode->hsync_start < min_sync_porch)
888 return MODE_HBLANK_NARROW;
889
890 vbp = (mode->vtotal - mode->vsync_end) / (interlaced ? 2 : 1);
891 if (vbp < 3)
892 return MODE_VBLANK_NARROW;
893
894 return MODE_OK;
895 }
896
897 static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
898 .atomic_check = rcar_du_crtc_atomic_check,
899 .atomic_begin = rcar_du_crtc_atomic_begin,
900 .atomic_flush = rcar_du_crtc_atomic_flush,
901 .atomic_enable = rcar_du_crtc_atomic_enable,
902 .atomic_disable = rcar_du_crtc_atomic_disable,
903 .mode_valid = rcar_du_crtc_mode_valid,
904 };
905
rcar_du_crtc_crc_init(struct rcar_du_crtc * rcrtc)906 static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
907 {
908 struct rcar_du_device *rcdu = rcrtc->dev;
909 const char **sources;
910 unsigned int count;
911 int i = -1;
912
913 /* CRC available only on Gen3 HW. */
914 if (rcdu->info->gen < 3)
915 return;
916
917 /* Reserve 1 for "auto" source. */
918 count = rcrtc->vsp->num_planes + 1;
919
920 sources = kmalloc_array(count, sizeof(*sources), GFP_KERNEL);
921 if (!sources)
922 return;
923
924 sources[0] = kstrdup("auto", GFP_KERNEL);
925 if (!sources[0])
926 goto error;
927
928 for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
929 struct drm_plane *plane = &rcrtc->vsp->planes[i].plane;
930 char name[16];
931
932 sprintf(name, "plane%u", plane->base.id);
933 sources[i + 1] = kstrdup(name, GFP_KERNEL);
934 if (!sources[i + 1])
935 goto error;
936 }
937
938 rcrtc->sources = sources;
939 rcrtc->sources_count = count;
940 return;
941
942 error:
943 while (i >= 0) {
944 kfree(sources[i]);
945 i--;
946 }
947 kfree(sources);
948 }
949
rcar_du_crtc_crc_cleanup(struct rcar_du_crtc * rcrtc)950 static void rcar_du_crtc_crc_cleanup(struct rcar_du_crtc *rcrtc)
951 {
952 unsigned int i;
953
954 if (!rcrtc->sources)
955 return;
956
957 for (i = 0; i < rcrtc->sources_count; i++)
958 kfree(rcrtc->sources[i]);
959 kfree(rcrtc->sources);
960
961 rcrtc->sources = NULL;
962 rcrtc->sources_count = 0;
963 }
964
965 static struct drm_crtc_state *
rcar_du_crtc_atomic_duplicate_state(struct drm_crtc * crtc)966 rcar_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
967 {
968 struct rcar_du_crtc_state *state;
969 struct rcar_du_crtc_state *copy;
970
971 if (WARN_ON(!crtc->state))
972 return NULL;
973
974 state = to_rcar_crtc_state(crtc->state);
975 copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
976 if (copy == NULL)
977 return NULL;
978
979 __drm_atomic_helper_crtc_duplicate_state(crtc, ©->state);
980
981 return ©->state;
982 }
983
rcar_du_crtc_atomic_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)984 static void rcar_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
985 struct drm_crtc_state *state)
986 {
987 __drm_atomic_helper_crtc_destroy_state(state);
988 kfree(to_rcar_crtc_state(state));
989 }
990
rcar_du_crtc_cleanup(struct drm_crtc * crtc)991 static void rcar_du_crtc_cleanup(struct drm_crtc *crtc)
992 {
993 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
994
995 rcar_du_crtc_crc_cleanup(rcrtc);
996
997 drm_crtc_cleanup(crtc);
998 }
999
rcar_du_crtc_reset(struct drm_crtc * crtc)1000 static void rcar_du_crtc_reset(struct drm_crtc *crtc)
1001 {
1002 struct rcar_du_crtc_state *state;
1003
1004 if (crtc->state) {
1005 rcar_du_crtc_atomic_destroy_state(crtc, crtc->state);
1006 crtc->state = NULL;
1007 }
1008
1009 state = kzalloc_obj(*state);
1010 if (state == NULL)
1011 return;
1012
1013 state->crc.source = VSP1_DU_CRC_NONE;
1014 state->crc.index = 0;
1015
1016 __drm_atomic_helper_crtc_reset(crtc, &state->state);
1017 }
1018
rcar_du_crtc_enable_vblank(struct drm_crtc * crtc)1019 static int rcar_du_crtc_enable_vblank(struct drm_crtc *crtc)
1020 {
1021 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
1022
1023 rcar_du_crtc_write(rcrtc, DSRCR, DSRCR_VBCL);
1024 rcar_du_crtc_set(rcrtc, DIER, DIER_VBE);
1025 rcrtc->vblank_enable = true;
1026
1027 return 0;
1028 }
1029
rcar_du_crtc_disable_vblank(struct drm_crtc * crtc)1030 static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
1031 {
1032 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
1033
1034 rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
1035 rcrtc->vblank_enable = false;
1036 }
1037
rcar_du_crtc_parse_crc_source(struct rcar_du_crtc * rcrtc,const char * source_name,enum vsp1_du_crc_source * source)1038 static int rcar_du_crtc_parse_crc_source(struct rcar_du_crtc *rcrtc,
1039 const char *source_name,
1040 enum vsp1_du_crc_source *source)
1041 {
1042 unsigned int index;
1043 int ret;
1044
1045 /*
1046 * Parse the source name. Supported values are "plane%u" to compute the
1047 * CRC on an input plane (%u is the plane ID), and "auto" to compute the
1048 * CRC on the composer (VSP) output.
1049 */
1050
1051 if (!source_name) {
1052 *source = VSP1_DU_CRC_NONE;
1053 return 0;
1054 } else if (!strcmp(source_name, "auto")) {
1055 *source = VSP1_DU_CRC_OUTPUT;
1056 return 0;
1057 } else if (strstarts(source_name, "plane")) {
1058 unsigned int i;
1059
1060 *source = VSP1_DU_CRC_PLANE;
1061
1062 ret = kstrtouint(source_name + strlen("plane"), 10, &index);
1063 if (ret < 0)
1064 return ret;
1065
1066 for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
1067 if (index == rcrtc->vsp->planes[i].plane.base.id)
1068 return i;
1069 }
1070 }
1071
1072 return -EINVAL;
1073 }
1074
rcar_du_crtc_verify_crc_source(struct drm_crtc * crtc,const char * source_name,size_t * values_cnt)1075 static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
1076 const char *source_name,
1077 size_t *values_cnt)
1078 {
1079 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
1080 enum vsp1_du_crc_source source;
1081
1082 if (rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source) < 0) {
1083 DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
1084 return -EINVAL;
1085 }
1086
1087 *values_cnt = 1;
1088 return 0;
1089 }
1090
1091 static const char *const *
rcar_du_crtc_get_crc_sources(struct drm_crtc * crtc,size_t * count)1092 rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count)
1093 {
1094 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
1095
1096 *count = rcrtc->sources_count;
1097 return rcrtc->sources;
1098 }
1099
rcar_du_crtc_set_crc_source(struct drm_crtc * crtc,const char * source_name)1100 static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
1101 const char *source_name)
1102 {
1103 struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
1104 struct drm_modeset_acquire_ctx ctx;
1105 struct drm_crtc_state *crtc_state;
1106 struct drm_atomic_state *state;
1107 enum vsp1_du_crc_source source;
1108 unsigned int index;
1109 int ret;
1110
1111 ret = rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source);
1112 if (ret < 0)
1113 return ret;
1114
1115 index = ret;
1116
1117 /* Perform an atomic commit to set the CRC source. */
1118 drm_modeset_acquire_init(&ctx, 0);
1119
1120 state = drm_atomic_state_alloc(crtc->dev);
1121 if (!state) {
1122 ret = -ENOMEM;
1123 goto unlock;
1124 }
1125
1126 state->acquire_ctx = &ctx;
1127
1128 retry:
1129 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1130 if (!IS_ERR(crtc_state)) {
1131 struct rcar_du_crtc_state *rcrtc_state;
1132
1133 rcrtc_state = to_rcar_crtc_state(crtc_state);
1134 rcrtc_state->crc.source = source;
1135 rcrtc_state->crc.index = index;
1136
1137 ret = drm_atomic_commit(state);
1138 } else {
1139 ret = PTR_ERR(crtc_state);
1140 }
1141
1142 if (ret == -EDEADLK) {
1143 drm_atomic_state_clear(state);
1144 drm_modeset_backoff(&ctx);
1145 goto retry;
1146 }
1147
1148 drm_atomic_state_put(state);
1149
1150 unlock:
1151 drm_modeset_drop_locks(&ctx);
1152 drm_modeset_acquire_fini(&ctx);
1153
1154 return ret;
1155 }
1156
1157 static const struct drm_crtc_funcs crtc_funcs_gen2 = {
1158 .reset = rcar_du_crtc_reset,
1159 .destroy = drm_crtc_cleanup,
1160 .set_config = drm_atomic_helper_set_config,
1161 .page_flip = drm_atomic_helper_page_flip,
1162 .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
1163 .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
1164 .enable_vblank = rcar_du_crtc_enable_vblank,
1165 .disable_vblank = rcar_du_crtc_disable_vblank,
1166 };
1167
1168 static const struct drm_crtc_funcs crtc_funcs_gen3 = {
1169 .reset = rcar_du_crtc_reset,
1170 .destroy = rcar_du_crtc_cleanup,
1171 .set_config = drm_atomic_helper_set_config,
1172 .page_flip = drm_atomic_helper_page_flip,
1173 .atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
1174 .atomic_destroy_state = rcar_du_crtc_atomic_destroy_state,
1175 .enable_vblank = rcar_du_crtc_enable_vblank,
1176 .disable_vblank = rcar_du_crtc_disable_vblank,
1177 .set_crc_source = rcar_du_crtc_set_crc_source,
1178 .verify_crc_source = rcar_du_crtc_verify_crc_source,
1179 .get_crc_sources = rcar_du_crtc_get_crc_sources,
1180 };
1181
1182 /* -----------------------------------------------------------------------------
1183 * Interrupt Handling
1184 */
1185
rcar_du_crtc_irq(int irq,void * arg)1186 static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
1187 {
1188 struct rcar_du_crtc *rcrtc = arg;
1189 struct rcar_du_device *rcdu = rcrtc->dev;
1190 irqreturn_t ret = IRQ_NONE;
1191 u32 status;
1192
1193 spin_lock(&rcrtc->vblank_lock);
1194
1195 status = rcar_du_crtc_read(rcrtc, DSSR);
1196 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
1197
1198 if (status & DSSR_VBK) {
1199 /*
1200 * Wake up the vblank wait if the counter reaches 0. This must
1201 * be protected by the vblank_lock to avoid races in
1202 * rcar_du_crtc_disable_planes().
1203 */
1204 if (rcrtc->vblank_count) {
1205 if (--rcrtc->vblank_count == 0)
1206 wake_up(&rcrtc->vblank_wait);
1207 }
1208 }
1209
1210 spin_unlock(&rcrtc->vblank_lock);
1211
1212 if (status & DSSR_VBK) {
1213 if (rcdu->info->gen < 3) {
1214 drm_crtc_handle_vblank(&rcrtc->crtc);
1215 rcar_du_crtc_finish_page_flip(rcrtc);
1216 }
1217
1218 ret = IRQ_HANDLED;
1219 }
1220
1221 return ret;
1222 }
1223
1224 /* -----------------------------------------------------------------------------
1225 * Initialization
1226 */
1227
rcar_du_crtc_create(struct rcar_du_group * rgrp,unsigned int swindex,unsigned int hwindex)1228 int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
1229 unsigned int hwindex)
1230 {
1231 static const unsigned int mmio_offsets[] = {
1232 DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET, DU3_REG_OFFSET
1233 };
1234
1235 struct rcar_du_device *rcdu = rgrp->dev;
1236 struct platform_device *pdev = to_platform_device(rcdu->dev);
1237 struct rcar_du_crtc *rcrtc = &rcdu->crtcs[swindex];
1238 struct drm_crtc *crtc = &rcrtc->crtc;
1239 struct drm_plane *primary;
1240 unsigned int irqflags;
1241 struct clk *clk;
1242 char clk_name[9];
1243 char *name;
1244 int irq;
1245 int ret;
1246
1247 /* Get the CRTC clock and the optional external clock. */
1248 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_CLOCK)) {
1249 sprintf(clk_name, "du.%u", hwindex);
1250 name = clk_name;
1251 } else {
1252 name = NULL;
1253 }
1254
1255 rcrtc->clock = devm_clk_get(rcdu->dev, name);
1256 if (IS_ERR(rcrtc->clock)) {
1257 dev_err(rcdu->dev, "no clock for DU channel %u\n", hwindex);
1258 return PTR_ERR(rcrtc->clock);
1259 }
1260
1261 sprintf(clk_name, "dclkin.%u", hwindex);
1262 clk = devm_clk_get(rcdu->dev, clk_name);
1263 if (!IS_ERR(clk)) {
1264 rcrtc->extclock = clk;
1265 } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
1266 return -EPROBE_DEFER;
1267 } else if (rcdu->info->dpll_mask & BIT(hwindex)) {
1268 /*
1269 * DU channels that have a display PLL can't use the internal
1270 * system clock and thus require an external clock.
1271 */
1272 ret = PTR_ERR(clk);
1273 dev_err(rcdu->dev, "can't get dclkin.%u: %d\n", hwindex, ret);
1274 return ret;
1275 }
1276
1277 init_waitqueue_head(&rcrtc->flip_wait);
1278 init_waitqueue_head(&rcrtc->vblank_wait);
1279 spin_lock_init(&rcrtc->vblank_lock);
1280
1281 rcrtc->dev = rcdu;
1282 rcrtc->group = rgrp;
1283 rcrtc->mmio_offset = mmio_offsets[hwindex];
1284 rcrtc->index = hwindex;
1285 rcrtc->dsysr = rcrtc->index % 2 ? 0 : DSYSR_DRES;
1286
1287 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_TVM_SYNC))
1288 rcrtc->dsysr |= DSYSR_TVM_TVSYNC;
1289
1290 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
1291 primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
1292 else
1293 primary = &rgrp->planes[swindex % 2].plane;
1294
1295 ret = drm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL,
1296 rcdu->info->gen <= 2 ?
1297 &crtc_funcs_gen2 : &crtc_funcs_gen3,
1298 NULL);
1299 if (ret < 0)
1300 return ret;
1301
1302 /* CMM might be disabled for this CRTC. */
1303 if (rcdu->cmms[swindex]) {
1304 rcrtc->cmm = rcdu->cmms[swindex];
1305 rgrp->cmms_mask |= BIT(hwindex % 2);
1306
1307 drm_mode_crtc_set_gamma_size(crtc, CM2_LUT_SIZE);
1308 drm_crtc_enable_color_mgmt(crtc, 0, false, CM2_LUT_SIZE);
1309 }
1310
1311 drm_crtc_helper_add(crtc, &crtc_helper_funcs);
1312
1313 /* Register the interrupt handler. */
1314 if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ)) {
1315 /* The IRQ's are associated with the CRTC (sw)index. */
1316 irq = platform_get_irq(pdev, swindex);
1317 irqflags = 0;
1318 } else {
1319 irq = platform_get_irq(pdev, 0);
1320 irqflags = IRQF_SHARED;
1321 }
1322
1323 if (irq < 0) {
1324 dev_err(rcdu->dev, "no IRQ for CRTC %u\n", swindex);
1325 return irq;
1326 }
1327
1328 ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
1329 dev_name(rcdu->dev), rcrtc);
1330 if (ret < 0) {
1331 dev_err(rcdu->dev,
1332 "failed to register IRQ for CRTC %u\n", swindex);
1333 return ret;
1334 }
1335
1336 rcar_du_crtc_crc_init(rcrtc);
1337
1338 return 0;
1339 }
1340