xref: /linux/drivers/gpu/drm/tegra/hub.c (revision cbac924200b838cfb8d8b1415113d788089dc50b)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/host1x.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/of_graph.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_runtime.h>
15 #include <linux/reset.h>
16 
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_fourcc.h>
20 #include <drm/drm_probe_helper.h>
21 
22 #include "drm.h"
23 #include "dc.h"
24 #include "plane.h"
25 
26 #define NFB 24
27 
28 static const u32 tegra_shared_plane_formats[] = {
29 	DRM_FORMAT_ARGB1555,
30 	DRM_FORMAT_RGB565,
31 	DRM_FORMAT_RGBA5551,
32 	DRM_FORMAT_ARGB8888,
33 	DRM_FORMAT_ABGR8888,
34 	/* new on Tegra114 */
35 	DRM_FORMAT_ABGR4444,
36 	DRM_FORMAT_ABGR1555,
37 	DRM_FORMAT_BGRA5551,
38 	DRM_FORMAT_XRGB1555,
39 	DRM_FORMAT_RGBX5551,
40 	DRM_FORMAT_XBGR1555,
41 	DRM_FORMAT_BGRX5551,
42 	DRM_FORMAT_BGR565,
43 	DRM_FORMAT_XRGB8888,
44 	DRM_FORMAT_XBGR8888,
45 	/* planar formats */
46 	DRM_FORMAT_UYVY,
47 	DRM_FORMAT_YUYV,
48 	DRM_FORMAT_YUV420,
49 	DRM_FORMAT_YUV422,
50 };
51 
52 static const u64 tegra_shared_plane_modifiers[] = {
53 	DRM_FORMAT_MOD_LINEAR,
54 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
55 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
56 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
57 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
58 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
59 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
60 	/*
61 	 * The GPU sector layout is only supported on Tegra194, but these will
62 	 * be filtered out later on by ->format_mod_supported() on SoCs where
63 	 * it isn't supported.
64 	 */
65 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
66 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
67 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
68 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
69 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
70 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
71 	/* sentinel */
72 	DRM_FORMAT_MOD_INVALID
73 };
74 
75 static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
76 					      unsigned int offset)
77 {
78 	if (offset >= 0x500 && offset <= 0x581) {
79 		offset = 0x000 + (offset - 0x500);
80 		return plane->offset + offset;
81 	}
82 
83 	if (offset >= 0x700 && offset <= 0x73c) {
84 		offset = 0x180 + (offset - 0x700);
85 		return plane->offset + offset;
86 	}
87 
88 	if (offset >= 0x800 && offset <= 0x83e) {
89 		offset = 0x1c0 + (offset - 0x800);
90 		return plane->offset + offset;
91 	}
92 
93 	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
94 
95 	return plane->offset + offset;
96 }
97 
98 static inline u32 tegra_plane_readl(struct tegra_plane *plane,
99 				    unsigned int offset)
100 {
101 	return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
102 }
103 
104 static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
105 				      unsigned int offset)
106 {
107 	tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
108 }
109 
110 static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
111 {
112 	int err = 0;
113 
114 	mutex_lock(&wgrp->lock);
115 
116 	if (wgrp->usecount == 0) {
117 		err = host1x_client_resume(wgrp->parent);
118 		if (err < 0) {
119 			dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
120 			goto unlock;
121 		}
122 
123 		reset_control_deassert(wgrp->rst);
124 	}
125 
126 	wgrp->usecount++;
127 
128 unlock:
129 	mutex_unlock(&wgrp->lock);
130 	return err;
131 }
132 
133 static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
134 {
135 	int err;
136 
137 	mutex_lock(&wgrp->lock);
138 
139 	if (wgrp->usecount == 1) {
140 		err = reset_control_assert(wgrp->rst);
141 		if (err < 0) {
142 			pr_err("failed to assert reset for window group %u\n",
143 			       wgrp->index);
144 		}
145 
146 		host1x_client_suspend(wgrp->parent);
147 	}
148 
149 	wgrp->usecount--;
150 	mutex_unlock(&wgrp->lock);
151 }
152 
153 int tegra_display_hub_prepare(struct tegra_display_hub *hub)
154 {
155 	unsigned int i;
156 
157 	/*
158 	 * XXX Enabling/disabling windowgroups needs to happen when the owner
159 	 * display controller is disabled. There's currently no good point at
160 	 * which this could be executed, so unconditionally enable all window
161 	 * groups for now.
162 	 */
163 	for (i = 0; i < hub->soc->num_wgrps; i++) {
164 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
165 
166 		/* Skip orphaned window group whose parent DC is disabled */
167 		if (wgrp->parent)
168 			tegra_windowgroup_enable(wgrp);
169 	}
170 
171 	return 0;
172 }
173 
174 void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
175 {
176 	unsigned int i;
177 
178 	/*
179 	 * XXX Remove this once window groups can be more fine-grainedly
180 	 * enabled and disabled.
181 	 */
182 	for (i = 0; i < hub->soc->num_wgrps; i++) {
183 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
184 
185 		/* Skip orphaned window group whose parent DC is disabled */
186 		if (wgrp->parent)
187 			tegra_windowgroup_disable(wgrp);
188 	}
189 }
190 
191 static void tegra_shared_plane_update(struct tegra_plane *plane)
192 {
193 	struct tegra_dc *dc = plane->dc;
194 	unsigned long timeout;
195 	u32 mask, value;
196 
197 	mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
198 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
199 
200 	timeout = jiffies + msecs_to_jiffies(1000);
201 
202 	while (time_before(jiffies, timeout)) {
203 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
204 		if ((value & mask) == 0)
205 			break;
206 
207 		usleep_range(100, 400);
208 	}
209 }
210 
211 static void tegra_shared_plane_activate(struct tegra_plane *plane)
212 {
213 	struct tegra_dc *dc = plane->dc;
214 	unsigned long timeout;
215 	u32 mask, value;
216 
217 	mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
218 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
219 
220 	timeout = jiffies + msecs_to_jiffies(1000);
221 
222 	while (time_before(jiffies, timeout)) {
223 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
224 		if ((value & mask) == 0)
225 			break;
226 
227 		usleep_range(100, 400);
228 	}
229 }
230 
231 static unsigned int
232 tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
233 {
234 	unsigned int offset =
235 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
236 
237 	return tegra_dc_readl(dc, offset) & OWNER_MASK;
238 }
239 
240 static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
241 				       struct tegra_plane *plane)
242 {
243 	struct device *dev = dc->dev;
244 
245 	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
246 		if (plane->dc == dc)
247 			return true;
248 
249 		dev_WARN(dev, "head %u owns window %u but is not attached\n",
250 			 dc->pipe, plane->index);
251 	}
252 
253 	return false;
254 }
255 
256 static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
257 					struct tegra_dc *new)
258 {
259 	unsigned int offset =
260 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
261 	struct tegra_dc *old = plane->dc, *dc = new ? new : old;
262 	struct device *dev = new ? new->dev : old->dev;
263 	unsigned int owner, index = plane->index;
264 	u32 value;
265 
266 	value = tegra_dc_readl(dc, offset);
267 	owner = value & OWNER_MASK;
268 
269 	if (new && (owner != OWNER_MASK && owner != new->pipe)) {
270 		dev_WARN(dev, "window %u owned by head %u\n", index, owner);
271 		return -EBUSY;
272 	}
273 
274 	/*
275 	 * This seems to happen whenever the head has been disabled with one
276 	 * or more windows being active. This is harmless because we'll just
277 	 * reassign the window to the new head anyway.
278 	 */
279 	if (old && owner == OWNER_MASK)
280 		dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
281 			old->pipe, owner);
282 
283 	value &= ~OWNER_MASK;
284 
285 	if (new)
286 		value |= OWNER(new->pipe);
287 	else
288 		value |= OWNER_MASK;
289 
290 	tegra_dc_writel(dc, value, offset);
291 
292 	plane->dc = new;
293 
294 	return 0;
295 }
296 
297 static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
298 {
299 	static const unsigned int coeffs[192] = {
300 		0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
301 		0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
302 		0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
303 		0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
304 		0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
305 		0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
306 		0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
307 		0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
308 		0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
309 		0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
310 		0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
311 		0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
312 		0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
313 		0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
314 		0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
315 		0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
316 		0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
317 		0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
318 		0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
319 		0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
320 		0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
321 		0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
322 		0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
323 		0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
324 		0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
325 		0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
326 		0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
327 		0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
328 		0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
329 		0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
330 		0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
331 		0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
332 		0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
333 		0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
334 		0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
335 		0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
336 		0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
337 		0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
338 		0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
339 		0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
340 		0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
341 		0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
342 		0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
343 		0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
344 		0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
345 		0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
346 		0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
347 		0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
348 	};
349 	unsigned int ratio, row, column;
350 
351 	for (ratio = 0; ratio <= 2; ratio++) {
352 		for (row = 0; row <= 15; row++) {
353 			for (column = 0; column <= 3; column++) {
354 				unsigned int index = (ratio << 6) + (row << 2) + column;
355 				u32 value;
356 
357 				value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
358 				tegra_plane_writel(plane, value,
359 						   DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
360 			}
361 		}
362 	}
363 }
364 
365 static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
366 					 struct tegra_plane *plane)
367 {
368 	u32 value;
369 	int err;
370 
371 	if (!tegra_dc_owns_shared_plane(dc, plane)) {
372 		err = tegra_shared_plane_set_owner(plane, dc);
373 		if (err < 0)
374 			return;
375 	}
376 
377 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
378 	value |= MODE_FOUR_LINES;
379 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
380 
381 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
382 	value = SLOTS(1);
383 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
384 
385 	/* disable watermark */
386 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
387 	value &= ~LATENCY_CTL_MODE_ENABLE;
388 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
389 
390 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
391 	value |= WATERMARK_MASK;
392 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
393 
394 	/* pipe meter */
395 	value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
396 	value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
397 	tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
398 
399 	/* mempool entries */
400 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
401 	value = MEMPOOL_ENTRIES(0x331);
402 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
403 
404 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
405 	value &= ~THREAD_NUM_MASK;
406 	value |= THREAD_NUM(plane->base.index);
407 	value |= THREAD_GROUP_ENABLE;
408 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
409 
410 	tegra_shared_plane_setup_scaler(plane);
411 
412 	tegra_shared_plane_update(plane);
413 	tegra_shared_plane_activate(plane);
414 }
415 
416 static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
417 					 struct tegra_plane *plane)
418 {
419 	tegra_shared_plane_set_owner(plane, NULL);
420 }
421 
422 static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
423 					   struct drm_atomic_state *state)
424 {
425 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
426 										 plane);
427 	struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
428 	struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
429 	struct tegra_bo_tiling *tiling = &plane_state->tiling;
430 	struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
431 	int err;
432 
433 	/* no need for further checks if the plane is being disabled */
434 	if (!new_plane_state->crtc || !new_plane_state->fb)
435 		return 0;
436 
437 	err = tegra_plane_format(new_plane_state->fb->format->format,
438 				 &plane_state->format,
439 				 &plane_state->swap);
440 	if (err < 0)
441 		return err;
442 
443 	err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
444 	if (err < 0)
445 		return err;
446 
447 	if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
448 	    !dc->soc->supports_block_linear) {
449 		DRM_ERROR("hardware doesn't support block linear mode\n");
450 		return -EINVAL;
451 	}
452 
453 	if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
454 	    !dc->soc->supports_sector_layout) {
455 		DRM_ERROR("hardware doesn't support GPU sector layout\n");
456 		return -EINVAL;
457 	}
458 
459 	/*
460 	 * Tegra doesn't support different strides for U and V planes so we
461 	 * error out if the user tries to display a framebuffer with such a
462 	 * configuration.
463 	 */
464 	if (new_plane_state->fb->format->num_planes > 2) {
465 		if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
466 			DRM_ERROR("unsupported UV-plane configuration\n");
467 			return -EINVAL;
468 		}
469 	}
470 
471 	/* XXX scaling is not yet supported, add a check here */
472 
473 	err = tegra_plane_state_add(&tegra->base, new_plane_state);
474 	if (err < 0)
475 		return err;
476 
477 	return 0;
478 }
479 
480 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
481 					      struct drm_atomic_state *state)
482 {
483 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
484 									   plane);
485 	struct tegra_plane *p = to_tegra_plane(plane);
486 	struct tegra_dc *dc;
487 	u32 value;
488 	int err;
489 
490 	/* rien ne va plus */
491 	if (!old_state || !old_state->crtc)
492 		return;
493 
494 	dc = to_tegra_dc(old_state->crtc);
495 
496 	err = host1x_client_resume(&dc->client);
497 	if (err < 0) {
498 		dev_err(dc->dev, "failed to resume: %d\n", err);
499 		return;
500 	}
501 
502 	/*
503 	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
504 	 * on planes that are already disabled. Make sure we fallback to the
505 	 * head for this particular state instead of crashing.
506 	 */
507 	if (WARN_ON(p->dc == NULL))
508 		p->dc = dc;
509 
510 	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
511 	value &= ~WIN_ENABLE;
512 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
513 
514 	tegra_dc_remove_shared_plane(dc, p);
515 
516 	host1x_client_suspend(&dc->client);
517 }
518 
519 static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
520 {
521 	u64 tmp, tmp1, tmp2;
522 
523 	tmp = (u64)dfixed_trunc(in);
524 	tmp2 = (u64)out;
525 	tmp1 = (tmp << NFB) + (tmp2 >> 1);
526 	do_div(tmp1, tmp2);
527 
528 	return lower_32_bits(tmp1);
529 }
530 
531 static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
532 					     struct drm_atomic_state *state)
533 {
534 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
535 									   plane);
536 	struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
537 	struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
538 	unsigned int zpos = new_state->normalized_zpos;
539 	struct drm_framebuffer *fb = new_state->fb;
540 	struct tegra_plane *p = to_tegra_plane(plane);
541 	u32 value, min_width, bypass = 0;
542 	dma_addr_t base, addr_flag = 0;
543 	unsigned int bpc, planes;
544 	bool yuv;
545 	int err;
546 
547 	/* rien ne va plus */
548 	if (!new_state->crtc || !new_state->fb)
549 		return;
550 
551 	if (!new_state->visible) {
552 		tegra_shared_plane_atomic_disable(plane, state);
553 		return;
554 	}
555 
556 	err = host1x_client_resume(&dc->client);
557 	if (err < 0) {
558 		dev_err(dc->dev, "failed to resume: %d\n", err);
559 		return;
560 	}
561 
562 	yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
563 
564 	tegra_dc_assign_shared_plane(dc, p);
565 
566 	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
567 
568 	/* blending */
569 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
570 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
571 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
572 	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
573 
574 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
575 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
576 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
577 	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
578 
579 	value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
580 	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
581 
582 	/* scaling */
583 	min_width = min(new_state->src_w >> 16, new_state->crtc_w);
584 
585 	value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
586 
587 	if (min_width < MAX_PIXELS_5TAP444(value)) {
588 		value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
589 	} else {
590 		value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
591 
592 		if (min_width < MAX_PIXELS_2TAP444(value))
593 			value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
594 		else
595 			dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
596 	}
597 
598 	value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
599 	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
600 
601 	if (new_state->src_w != new_state->crtc_w << 16) {
602 		fixed20_12 width = dfixed_init(new_state->src_w >> 16);
603 		u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
604 		u32 init = (1 << (NFB - 1)) + (incr >> 1);
605 
606 		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
607 		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
608 	} else {
609 		bypass |= INPUT_SCALER_HBYPASS;
610 	}
611 
612 	if (new_state->src_h != new_state->crtc_h << 16) {
613 		fixed20_12 height = dfixed_init(new_state->src_h >> 16);
614 		u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
615 		u32 init = (1 << (NFB - 1)) + (incr >> 1);
616 
617 		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
618 		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
619 	} else {
620 		bypass |= INPUT_SCALER_VBYPASS;
621 	}
622 
623 	tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
624 
625 	/* disable compression */
626 	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
627 
628 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
629 	/*
630 	 * Physical address bit 39 in Tegra194 is used as a switch for special
631 	 * logic that swizzles the memory using either the legacy Tegra or the
632 	 * dGPU sector layout.
633 	 */
634 	if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
635 		addr_flag = BIT_ULL(39);
636 #endif
637 
638 	base = tegra_plane_state->iova[0] + fb->offsets[0];
639 	base |= addr_flag;
640 
641 	tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
642 	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
643 
644 	value = V_POSITION(new_state->crtc_y) |
645 		H_POSITION(new_state->crtc_x);
646 	tegra_plane_writel(p, value, DC_WIN_POSITION);
647 
648 	value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
649 	tegra_plane_writel(p, value, DC_WIN_SIZE);
650 
651 	value = WIN_ENABLE | COLOR_EXPAND;
652 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
653 
654 	value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
655 	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
656 
657 	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
658 	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
659 
660 	value = PITCH(fb->pitches[0]);
661 	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
662 
663 	if (yuv && planes > 1) {
664 		base = tegra_plane_state->iova[1] + fb->offsets[1];
665 		base |= addr_flag;
666 
667 		tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
668 		tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
669 
670 		if (planes > 2) {
671 			base = tegra_plane_state->iova[2] + fb->offsets[2];
672 			base |= addr_flag;
673 
674 			tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
675 			tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
676 		}
677 
678 		value = PITCH_U(fb->pitches[1]);
679 
680 		if (planes > 2)
681 			value |= PITCH_V(fb->pitches[2]);
682 
683 		tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
684 	} else {
685 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
686 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
687 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
688 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
689 		tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
690 	}
691 
692 	value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
693 
694 	if (yuv) {
695 		if (bpc < 12)
696 			value |= DEGAMMA_YUV8_10;
697 		else
698 			value |= DEGAMMA_YUV12;
699 
700 		/* XXX parameterize */
701 		value |= COLOR_SPACE_YUV_2020;
702 	} else {
703 		if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
704 			value |= DEGAMMA_SRGB;
705 	}
706 
707 	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
708 
709 	value = OFFSET_X(new_state->src_y >> 16) |
710 		OFFSET_Y(new_state->src_x >> 16);
711 	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
712 
713 	if (dc->soc->supports_block_linear) {
714 		unsigned long height = tegra_plane_state->tiling.value;
715 
716 		/* XXX */
717 		switch (tegra_plane_state->tiling.mode) {
718 		case TEGRA_BO_TILING_MODE_PITCH:
719 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
720 				DC_WINBUF_SURFACE_KIND_PITCH;
721 			break;
722 
723 		/* XXX not supported on Tegra186 and later */
724 		case TEGRA_BO_TILING_MODE_TILED:
725 			value = DC_WINBUF_SURFACE_KIND_TILED;
726 			break;
727 
728 		case TEGRA_BO_TILING_MODE_BLOCK:
729 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
730 				DC_WINBUF_SURFACE_KIND_BLOCK;
731 			break;
732 		}
733 
734 		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
735 	}
736 
737 	/* disable gamut CSC */
738 	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
739 	value &= ~CONTROL_CSC_ENABLE;
740 	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
741 
742 	host1x_client_suspend(&dc->client);
743 }
744 
745 static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
746 	.prepare_fb = tegra_plane_prepare_fb,
747 	.cleanup_fb = tegra_plane_cleanup_fb,
748 	.atomic_check = tegra_shared_plane_atomic_check,
749 	.atomic_update = tegra_shared_plane_atomic_update,
750 	.atomic_disable = tegra_shared_plane_atomic_disable,
751 };
752 
753 struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
754 					    struct tegra_dc *dc,
755 					    unsigned int wgrp,
756 					    unsigned int index)
757 {
758 	enum drm_plane_type type = DRM_PLANE_TYPE_OVERLAY;
759 	struct tegra_drm *tegra = drm->dev_private;
760 	struct tegra_display_hub *hub = tegra->hub;
761 	struct tegra_shared_plane *plane;
762 	unsigned int possible_crtcs;
763 	unsigned int num_formats;
764 	const u64 *modifiers;
765 	struct drm_plane *p;
766 	const u32 *formats;
767 	int err;
768 
769 	plane = kzalloc(sizeof(*plane), GFP_KERNEL);
770 	if (!plane)
771 		return ERR_PTR(-ENOMEM);
772 
773 	plane->base.offset = 0x0a00 + 0x0300 * index;
774 	plane->base.index = index;
775 
776 	plane->wgrp = &hub->wgrps[wgrp];
777 	plane->wgrp->parent = &dc->client;
778 
779 	p = &plane->base.base;
780 
781 	/* planes can be assigned to arbitrary CRTCs */
782 	possible_crtcs = BIT(tegra->num_crtcs) - 1;
783 
784 	num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
785 	formats = tegra_shared_plane_formats;
786 	modifiers = tegra_shared_plane_modifiers;
787 
788 	err = drm_universal_plane_init(drm, p, possible_crtcs,
789 				       &tegra_plane_funcs, formats,
790 				       num_formats, modifiers, type, NULL);
791 	if (err < 0) {
792 		kfree(plane);
793 		return ERR_PTR(err);
794 	}
795 
796 	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
797 	drm_plane_create_zpos_property(p, 0, 0, 255);
798 
799 	return p;
800 }
801 
802 static struct drm_private_state *
803 tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
804 {
805 	struct tegra_display_hub_state *state;
806 
807 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
808 	if (!state)
809 		return NULL;
810 
811 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
812 
813 	return &state->base;
814 }
815 
816 static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
817 					    struct drm_private_state *state)
818 {
819 	struct tegra_display_hub_state *hub_state =
820 		to_tegra_display_hub_state(state);
821 
822 	kfree(hub_state);
823 }
824 
825 static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
826 	.atomic_duplicate_state = tegra_display_hub_duplicate_state,
827 	.atomic_destroy_state = tegra_display_hub_destroy_state,
828 };
829 
830 static struct tegra_display_hub_state *
831 tegra_display_hub_get_state(struct tegra_display_hub *hub,
832 			    struct drm_atomic_state *state)
833 {
834 	struct drm_private_state *priv;
835 
836 	priv = drm_atomic_get_private_obj_state(state, &hub->base);
837 	if (IS_ERR(priv))
838 		return ERR_CAST(priv);
839 
840 	return to_tegra_display_hub_state(priv);
841 }
842 
843 int tegra_display_hub_atomic_check(struct drm_device *drm,
844 				   struct drm_atomic_state *state)
845 {
846 	struct tegra_drm *tegra = drm->dev_private;
847 	struct tegra_display_hub_state *hub_state;
848 	struct drm_crtc_state *old, *new;
849 	struct drm_crtc *crtc;
850 	unsigned int i;
851 
852 	if (!tegra->hub)
853 		return 0;
854 
855 	hub_state = tegra_display_hub_get_state(tegra->hub, state);
856 	if (IS_ERR(hub_state))
857 		return PTR_ERR(hub_state);
858 
859 	/*
860 	 * The display hub display clock needs to be fed by the display clock
861 	 * with the highest frequency to ensure proper functioning of all the
862 	 * displays.
863 	 *
864 	 * Note that this isn't used before Tegra186, but it doesn't hurt and
865 	 * conditionalizing it would make the code less clean.
866 	 */
867 	for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
868 		struct tegra_dc_state *dc = to_dc_state(new);
869 
870 		if (new->active) {
871 			if (!hub_state->clk || dc->pclk > hub_state->rate) {
872 				hub_state->dc = to_tegra_dc(dc->base.crtc);
873 				hub_state->clk = hub_state->dc->clk;
874 				hub_state->rate = dc->pclk;
875 			}
876 		}
877 	}
878 
879 	return 0;
880 }
881 
882 static void tegra_display_hub_update(struct tegra_dc *dc)
883 {
884 	u32 value;
885 	int err;
886 
887 	err = host1x_client_resume(&dc->client);
888 	if (err < 0) {
889 		dev_err(dc->dev, "failed to resume: %d\n", err);
890 		return;
891 	}
892 
893 	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
894 	value &= ~LATENCY_EVENT;
895 	tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
896 
897 	value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
898 	value = CURS_SLOTS(1) | WGRP_SLOTS(1);
899 	tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
900 
901 	tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
902 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
903 	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
904 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
905 
906 	host1x_client_suspend(&dc->client);
907 }
908 
909 void tegra_display_hub_atomic_commit(struct drm_device *drm,
910 				     struct drm_atomic_state *state)
911 {
912 	struct tegra_drm *tegra = drm->dev_private;
913 	struct tegra_display_hub *hub = tegra->hub;
914 	struct tegra_display_hub_state *hub_state;
915 	struct device *dev = hub->client.dev;
916 	int err;
917 
918 	hub_state = to_tegra_display_hub_state(hub->base.state);
919 
920 	if (hub_state->clk) {
921 		err = clk_set_rate(hub_state->clk, hub_state->rate);
922 		if (err < 0)
923 			dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
924 				hub_state->clk, hub_state->rate);
925 
926 		err = clk_set_parent(hub->clk_disp, hub_state->clk);
927 		if (err < 0)
928 			dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
929 				hub->clk_disp, hub_state->clk, err);
930 	}
931 
932 	if (hub_state->dc)
933 		tegra_display_hub_update(hub_state->dc);
934 }
935 
936 static int tegra_display_hub_init(struct host1x_client *client)
937 {
938 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
939 	struct drm_device *drm = dev_get_drvdata(client->host);
940 	struct tegra_drm *tegra = drm->dev_private;
941 	struct tegra_display_hub_state *state;
942 
943 	state = kzalloc(sizeof(*state), GFP_KERNEL);
944 	if (!state)
945 		return -ENOMEM;
946 
947 	drm_atomic_private_obj_init(drm, &hub->base, &state->base,
948 				    &tegra_display_hub_state_funcs);
949 
950 	tegra->hub = hub;
951 
952 	return 0;
953 }
954 
955 static int tegra_display_hub_exit(struct host1x_client *client)
956 {
957 	struct drm_device *drm = dev_get_drvdata(client->host);
958 	struct tegra_drm *tegra = drm->dev_private;
959 
960 	drm_atomic_private_obj_fini(&tegra->hub->base);
961 	tegra->hub = NULL;
962 
963 	return 0;
964 }
965 
966 static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
967 {
968 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
969 	struct device *dev = client->dev;
970 	unsigned int i = hub->num_heads;
971 	int err;
972 
973 	err = reset_control_assert(hub->rst);
974 	if (err < 0)
975 		return err;
976 
977 	while (i--)
978 		clk_disable_unprepare(hub->clk_heads[i]);
979 
980 	clk_disable_unprepare(hub->clk_hub);
981 	clk_disable_unprepare(hub->clk_dsc);
982 	clk_disable_unprepare(hub->clk_disp);
983 
984 	pm_runtime_put_sync(dev);
985 
986 	return 0;
987 }
988 
989 static int tegra_display_hub_runtime_resume(struct host1x_client *client)
990 {
991 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
992 	struct device *dev = client->dev;
993 	unsigned int i;
994 	int err;
995 
996 	err = pm_runtime_resume_and_get(dev);
997 	if (err < 0) {
998 		dev_err(dev, "failed to get runtime PM: %d\n", err);
999 		return err;
1000 	}
1001 
1002 	err = clk_prepare_enable(hub->clk_disp);
1003 	if (err < 0)
1004 		goto put_rpm;
1005 
1006 	err = clk_prepare_enable(hub->clk_dsc);
1007 	if (err < 0)
1008 		goto disable_disp;
1009 
1010 	err = clk_prepare_enable(hub->clk_hub);
1011 	if (err < 0)
1012 		goto disable_dsc;
1013 
1014 	for (i = 0; i < hub->num_heads; i++) {
1015 		err = clk_prepare_enable(hub->clk_heads[i]);
1016 		if (err < 0)
1017 			goto disable_heads;
1018 	}
1019 
1020 	err = reset_control_deassert(hub->rst);
1021 	if (err < 0)
1022 		goto disable_heads;
1023 
1024 	return 0;
1025 
1026 disable_heads:
1027 	while (i--)
1028 		clk_disable_unprepare(hub->clk_heads[i]);
1029 
1030 	clk_disable_unprepare(hub->clk_hub);
1031 disable_dsc:
1032 	clk_disable_unprepare(hub->clk_dsc);
1033 disable_disp:
1034 	clk_disable_unprepare(hub->clk_disp);
1035 put_rpm:
1036 	pm_runtime_put_sync(dev);
1037 	return err;
1038 }
1039 
1040 static const struct host1x_client_ops tegra_display_hub_ops = {
1041 	.init = tegra_display_hub_init,
1042 	.exit = tegra_display_hub_exit,
1043 	.suspend = tegra_display_hub_runtime_suspend,
1044 	.resume = tegra_display_hub_runtime_resume,
1045 };
1046 
1047 static int tegra_display_hub_probe(struct platform_device *pdev)
1048 {
1049 	u64 dma_mask = dma_get_mask(pdev->dev.parent);
1050 	struct device_node *child = NULL;
1051 	struct tegra_display_hub *hub;
1052 	struct clk *clk;
1053 	unsigned int i;
1054 	int err;
1055 
1056 	err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1057 	if (err < 0) {
1058 		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1059 		return err;
1060 	}
1061 
1062 	hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1063 	if (!hub)
1064 		return -ENOMEM;
1065 
1066 	hub->soc = of_device_get_match_data(&pdev->dev);
1067 
1068 	hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1069 	if (IS_ERR(hub->clk_disp)) {
1070 		err = PTR_ERR(hub->clk_disp);
1071 		return err;
1072 	}
1073 
1074 	if (hub->soc->supports_dsc) {
1075 		hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1076 		if (IS_ERR(hub->clk_dsc)) {
1077 			err = PTR_ERR(hub->clk_dsc);
1078 			return err;
1079 		}
1080 	}
1081 
1082 	hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1083 	if (IS_ERR(hub->clk_hub)) {
1084 		err = PTR_ERR(hub->clk_hub);
1085 		return err;
1086 	}
1087 
1088 	hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1089 	if (IS_ERR(hub->rst)) {
1090 		err = PTR_ERR(hub->rst);
1091 		return err;
1092 	}
1093 
1094 	hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1095 				  sizeof(*hub->wgrps), GFP_KERNEL);
1096 	if (!hub->wgrps)
1097 		return -ENOMEM;
1098 
1099 	for (i = 0; i < hub->soc->num_wgrps; i++) {
1100 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1101 		char id[8];
1102 
1103 		snprintf(id, sizeof(id), "wgrp%u", i);
1104 		mutex_init(&wgrp->lock);
1105 		wgrp->usecount = 0;
1106 		wgrp->index = i;
1107 
1108 		wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1109 		if (IS_ERR(wgrp->rst))
1110 			return PTR_ERR(wgrp->rst);
1111 
1112 		err = reset_control_assert(wgrp->rst);
1113 		if (err < 0)
1114 			return err;
1115 	}
1116 
1117 	hub->num_heads = of_get_child_count(pdev->dev.of_node);
1118 
1119 	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1120 				      GFP_KERNEL);
1121 	if (!hub->clk_heads)
1122 		return -ENOMEM;
1123 
1124 	for (i = 0; i < hub->num_heads; i++) {
1125 		child = of_get_next_child(pdev->dev.of_node, child);
1126 		if (!child) {
1127 			dev_err(&pdev->dev, "failed to find node for head %u\n",
1128 				i);
1129 			return -ENODEV;
1130 		}
1131 
1132 		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1133 		if (IS_ERR(clk)) {
1134 			dev_err(&pdev->dev, "failed to get clock for head %u\n",
1135 				i);
1136 			of_node_put(child);
1137 			return PTR_ERR(clk);
1138 		}
1139 
1140 		hub->clk_heads[i] = clk;
1141 	}
1142 
1143 	of_node_put(child);
1144 
1145 	/* XXX: enable clock across reset? */
1146 	err = reset_control_assert(hub->rst);
1147 	if (err < 0)
1148 		return err;
1149 
1150 	platform_set_drvdata(pdev, hub);
1151 	pm_runtime_enable(&pdev->dev);
1152 
1153 	INIT_LIST_HEAD(&hub->client.list);
1154 	hub->client.ops = &tegra_display_hub_ops;
1155 	hub->client.dev = &pdev->dev;
1156 
1157 	err = host1x_client_register(&hub->client);
1158 	if (err < 0)
1159 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1160 			err);
1161 
1162 	err = devm_of_platform_populate(&pdev->dev);
1163 	if (err < 0)
1164 		goto unregister;
1165 
1166 	return err;
1167 
1168 unregister:
1169 	host1x_client_unregister(&hub->client);
1170 	pm_runtime_disable(&pdev->dev);
1171 	return err;
1172 }
1173 
1174 static int tegra_display_hub_remove(struct platform_device *pdev)
1175 {
1176 	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1177 	unsigned int i;
1178 	int err;
1179 
1180 	err = host1x_client_unregister(&hub->client);
1181 	if (err < 0) {
1182 		dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
1183 			err);
1184 	}
1185 
1186 	for (i = 0; i < hub->soc->num_wgrps; i++) {
1187 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1188 
1189 		mutex_destroy(&wgrp->lock);
1190 	}
1191 
1192 	pm_runtime_disable(&pdev->dev);
1193 
1194 	return err;
1195 }
1196 
1197 static const struct tegra_display_hub_soc tegra186_display_hub = {
1198 	.num_wgrps = 6,
1199 	.supports_dsc = true,
1200 };
1201 
1202 static const struct tegra_display_hub_soc tegra194_display_hub = {
1203 	.num_wgrps = 6,
1204 	.supports_dsc = false,
1205 };
1206 
1207 static const struct of_device_id tegra_display_hub_of_match[] = {
1208 	{
1209 		.compatible = "nvidia,tegra194-display",
1210 		.data = &tegra194_display_hub
1211 	}, {
1212 		.compatible = "nvidia,tegra186-display",
1213 		.data = &tegra186_display_hub
1214 	}, {
1215 		/* sentinel */
1216 	}
1217 };
1218 MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1219 
1220 struct platform_driver tegra_display_hub_driver = {
1221 	.driver = {
1222 		.name = "tegra-display-hub",
1223 		.of_match_table = tegra_display_hub_of_match,
1224 	},
1225 	.probe = tegra_display_hub_probe,
1226 	.remove = tegra_display_hub_remove,
1227 };
1228