xref: /linux/drivers/gpu/drm/tegra/hub.c (revision 3590a52f0d0903e600dd01e2cf30820c404beca4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 NVIDIA CORPORATION.  All rights reserved.
4  */
5 
6 #include <linux/clk.h>
7 #include <linux/delay.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/host1x.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_graph.h>
13 #include <linux/of_platform.h>
14 #include <linux/platform_device.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/reset.h>
17 
18 #include <drm/drm_atomic.h>
19 #include <drm/drm_atomic_helper.h>
20 #include <drm/drm_blend.h>
21 #include <drm/drm_fourcc.h>
22 #include <drm/drm_framebuffer.h>
23 #include <drm/drm_print.h>
24 #include <drm/drm_probe_helper.h>
25 
26 #include "drm.h"
27 #include "dc.h"
28 #include "plane.h"
29 
30 #define NFB 24
31 
32 static const u32 tegra_shared_plane_formats[] = {
33 	DRM_FORMAT_ARGB1555,
34 	DRM_FORMAT_RGB565,
35 	DRM_FORMAT_RGBA5551,
36 	DRM_FORMAT_ARGB8888,
37 	DRM_FORMAT_ABGR8888,
38 	/* new on Tegra114 */
39 	DRM_FORMAT_ABGR4444,
40 	DRM_FORMAT_ABGR1555,
41 	DRM_FORMAT_BGRA5551,
42 	DRM_FORMAT_XRGB1555,
43 	DRM_FORMAT_RGBX5551,
44 	DRM_FORMAT_XBGR1555,
45 	DRM_FORMAT_BGRX5551,
46 	DRM_FORMAT_BGR565,
47 	DRM_FORMAT_XRGB8888,
48 	DRM_FORMAT_XBGR8888,
49 	/* planar formats */
50 	DRM_FORMAT_UYVY,
51 	DRM_FORMAT_YUYV,
52 	DRM_FORMAT_YUV420,
53 	DRM_FORMAT_YUV422,
54 };
55 
56 static const u64 tegra_shared_plane_modifiers[] = {
57 	DRM_FORMAT_MOD_LINEAR,
58 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
59 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
60 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
61 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
62 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
63 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
64 	/*
65 	 * The GPU sector layout is only supported on Tegra194, but these will
66 	 * be filtered out later on by ->format_mod_supported() on SoCs where
67 	 * it isn't supported.
68 	 */
69 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
70 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
71 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
72 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
73 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
74 	DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5) | DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT,
75 	/* sentinel */
76 	DRM_FORMAT_MOD_INVALID
77 };
78 
79 static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
80 					      unsigned int offset)
81 {
82 	if (offset >= 0x500 && offset <= 0x581) {
83 		offset = 0x000 + (offset - 0x500);
84 		return plane->offset + offset;
85 	}
86 
87 	if (offset >= 0x700 && offset <= 0x73c) {
88 		offset = 0x180 + (offset - 0x700);
89 		return plane->offset + offset;
90 	}
91 
92 	if (offset >= 0x800 && offset <= 0x83e) {
93 		offset = 0x1c0 + (offset - 0x800);
94 		return plane->offset + offset;
95 	}
96 
97 	dev_WARN(plane->dc->dev, "invalid offset: %x\n", offset);
98 
99 	return plane->offset + offset;
100 }
101 
102 static inline u32 tegra_plane_readl(struct tegra_plane *plane,
103 				    unsigned int offset)
104 {
105 	return tegra_dc_readl(plane->dc, tegra_plane_offset(plane, offset));
106 }
107 
108 static inline void tegra_plane_writel(struct tegra_plane *plane, u32 value,
109 				      unsigned int offset)
110 {
111 	tegra_dc_writel(plane->dc, value, tegra_plane_offset(plane, offset));
112 }
113 
114 static int tegra_windowgroup_enable(struct tegra_windowgroup *wgrp)
115 {
116 	int err = 0;
117 
118 	mutex_lock(&wgrp->lock);
119 
120 	if (wgrp->usecount == 0) {
121 		err = host1x_client_resume(wgrp->parent);
122 		if (err < 0) {
123 			dev_err(wgrp->parent->dev, "failed to resume: %d\n", err);
124 			goto unlock;
125 		}
126 
127 		reset_control_deassert(wgrp->rst);
128 	}
129 
130 	wgrp->usecount++;
131 
132 unlock:
133 	mutex_unlock(&wgrp->lock);
134 	return err;
135 }
136 
137 static void tegra_windowgroup_disable(struct tegra_windowgroup *wgrp)
138 {
139 	int err;
140 
141 	mutex_lock(&wgrp->lock);
142 
143 	if (wgrp->usecount == 1) {
144 		err = reset_control_assert(wgrp->rst);
145 		if (err < 0) {
146 			pr_err("failed to assert reset for window group %u\n",
147 			       wgrp->index);
148 		}
149 
150 		host1x_client_suspend(wgrp->parent);
151 	}
152 
153 	wgrp->usecount--;
154 	mutex_unlock(&wgrp->lock);
155 }
156 
157 int tegra_display_hub_prepare(struct tegra_display_hub *hub)
158 {
159 	unsigned int i;
160 
161 	/*
162 	 * XXX Enabling/disabling windowgroups needs to happen when the owner
163 	 * display controller is disabled. There's currently no good point at
164 	 * which this could be executed, so unconditionally enable all window
165 	 * groups for now.
166 	 */
167 	for (i = 0; i < hub->soc->num_wgrps; i++) {
168 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
169 
170 		/* Skip orphaned window group whose parent DC is disabled */
171 		if (wgrp->parent)
172 			tegra_windowgroup_enable(wgrp);
173 	}
174 
175 	return 0;
176 }
177 
178 void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
179 {
180 	unsigned int i;
181 
182 	/*
183 	 * XXX Remove this once window groups can be more fine-grainedly
184 	 * enabled and disabled.
185 	 */
186 	for (i = 0; i < hub->soc->num_wgrps; i++) {
187 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
188 
189 		/* Skip orphaned window group whose parent DC is disabled */
190 		if (wgrp->parent)
191 			tegra_windowgroup_disable(wgrp);
192 	}
193 }
194 
195 static void tegra_shared_plane_update(struct tegra_plane *plane)
196 {
197 	struct tegra_dc *dc = plane->dc;
198 	unsigned long timeout;
199 	u32 mask, value;
200 
201 	mask = COMMON_UPDATE | WIN_A_UPDATE << plane->base.index;
202 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
203 
204 	timeout = jiffies + msecs_to_jiffies(1000);
205 
206 	while (time_before(jiffies, timeout)) {
207 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
208 		if ((value & mask) == 0)
209 			break;
210 
211 		usleep_range(100, 400);
212 	}
213 }
214 
215 static void tegra_shared_plane_activate(struct tegra_plane *plane)
216 {
217 	struct tegra_dc *dc = plane->dc;
218 	unsigned long timeout;
219 	u32 mask, value;
220 
221 	mask = COMMON_ACTREQ | WIN_A_ACT_REQ << plane->base.index;
222 	tegra_dc_writel(dc, mask, DC_CMD_STATE_CONTROL);
223 
224 	timeout = jiffies + msecs_to_jiffies(1000);
225 
226 	while (time_before(jiffies, timeout)) {
227 		value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
228 		if ((value & mask) == 0)
229 			break;
230 
231 		usleep_range(100, 400);
232 	}
233 }
234 
235 static unsigned int
236 tegra_shared_plane_get_owner(struct tegra_plane *plane, struct tegra_dc *dc)
237 {
238 	unsigned int offset =
239 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
240 
241 	return tegra_dc_readl(dc, offset) & OWNER_MASK;
242 }
243 
244 static bool tegra_dc_owns_shared_plane(struct tegra_dc *dc,
245 				       struct tegra_plane *plane)
246 {
247 	struct device *dev = dc->dev;
248 
249 	if (tegra_shared_plane_get_owner(plane, dc) == dc->pipe) {
250 		if (plane->dc == dc)
251 			return true;
252 
253 		dev_WARN(dev, "head %u owns window %u but is not attached\n",
254 			 dc->pipe, plane->index);
255 	}
256 
257 	return false;
258 }
259 
260 static int tegra_shared_plane_set_owner(struct tegra_plane *plane,
261 					struct tegra_dc *new)
262 {
263 	unsigned int offset =
264 		tegra_plane_offset(plane, DC_WIN_CORE_WINDOWGROUP_SET_CONTROL);
265 	struct tegra_dc *old = plane->dc, *dc = new ? new : old;
266 	struct device *dev = new ? new->dev : old->dev;
267 	unsigned int owner, index = plane->index;
268 	u32 value;
269 
270 	value = tegra_dc_readl(dc, offset);
271 	owner = value & OWNER_MASK;
272 
273 	if (new && (owner != OWNER_MASK && owner != new->pipe)) {
274 		dev_WARN(dev, "window %u owned by head %u\n", index, owner);
275 		return -EBUSY;
276 	}
277 
278 	/*
279 	 * This seems to happen whenever the head has been disabled with one
280 	 * or more windows being active. This is harmless because we'll just
281 	 * reassign the window to the new head anyway.
282 	 */
283 	if (old && owner == OWNER_MASK)
284 		dev_dbg(dev, "window %u not owned by head %u but %u\n", index,
285 			old->pipe, owner);
286 
287 	value &= ~OWNER_MASK;
288 
289 	if (new)
290 		value |= OWNER(new->pipe);
291 	else
292 		value |= OWNER_MASK;
293 
294 	tegra_dc_writel(dc, value, offset);
295 
296 	plane->dc = new;
297 
298 	return 0;
299 }
300 
301 static void tegra_shared_plane_setup_scaler(struct tegra_plane *plane)
302 {
303 	static const unsigned int coeffs[192] = {
304 		0x00000000, 0x3c70e400, 0x3bb037e4, 0x0c51cc9c,
305 		0x00100001, 0x3bf0dbfa, 0x3d00f406, 0x3fe003ff,
306 		0x00300002, 0x3b80cbf5, 0x3da1040d, 0x3fb003fe,
307 		0x00400002, 0x3b20bff1, 0x3e511015, 0x3f9003fc,
308 		0x00500002, 0x3ad0b3ed, 0x3f21201d, 0x3f5003fb,
309 		0x00500003, 0x3aa0a3e9, 0x3ff13026, 0x3f2007f9,
310 		0x00500403, 0x3a7097e6, 0x00e1402f, 0x3ee007f7,
311 		0x00500403, 0x3a608be4, 0x01d14c38, 0x3ea00bf6,
312 		0x00500403, 0x3a507fe2, 0x02e15c42, 0x3e500ff4,
313 		0x00500402, 0x3a6073e1, 0x03f16c4d, 0x3e000ff2,
314 		0x00400402, 0x3a706be0, 0x05117858, 0x3db013f0,
315 		0x00300402, 0x3a905fe0, 0x06318863, 0x3d6017ee,
316 		0x00300402, 0x3ab057e0, 0x0771986e, 0x3d001beb,
317 		0x00200001, 0x3af04fe1, 0x08a1a47a, 0x3cb023e9,
318 		0x00100001, 0x3b2047e2, 0x09e1b485, 0x3c6027e7,
319 		0x00100000, 0x3b703fe2, 0x0b11c091, 0x3c002fe6,
320 		0x3f203800, 0x0391103f, 0x3ff0a014, 0x0811606c,
321 		0x3f2037ff, 0x0351083c, 0x03e11842, 0x3f203c00,
322 		0x3f302fff, 0x03010439, 0x04311c45, 0x3f104401,
323 		0x3f302fff, 0x02c0fc35, 0x04812448, 0x3f104802,
324 		0x3f4027ff, 0x0270f832, 0x04c1284b, 0x3f205003,
325 		0x3f4023ff, 0x0230f030, 0x0511304e, 0x3f205403,
326 		0x3f601fff, 0x01f0e82d, 0x05613451, 0x3f205c04,
327 		0x3f701bfe, 0x01b0e02a, 0x05a13c54, 0x3f306006,
328 		0x3f7017fe, 0x0170d827, 0x05f14057, 0x3f406807,
329 		0x3f8017ff, 0x0140d424, 0x0641445a, 0x3f406c08,
330 		0x3fa013ff, 0x0100cc22, 0x0681485d, 0x3f507409,
331 		0x3fa00fff, 0x00d0c41f, 0x06d14c60, 0x3f607c0b,
332 		0x3fc00fff, 0x0090bc1c, 0x07115063, 0x3f80840c,
333 		0x3fd00bff, 0x0070b41a, 0x07515465, 0x3f908c0e,
334 		0x3fe007ff, 0x0040b018, 0x07915868, 0x3fb0900f,
335 		0x3ff00400, 0x0010a816, 0x07d15c6a, 0x3fd09811,
336 		0x00a04c0e, 0x0460f442, 0x0240a827, 0x05c15859,
337 		0x0090440d, 0x0440f040, 0x0480fc43, 0x00b05010,
338 		0x0080400c, 0x0410ec3e, 0x04910044, 0x00d05411,
339 		0x0070380b, 0x03f0e83d, 0x04b10846, 0x00e05812,
340 		0x0060340a, 0x03d0e43b, 0x04d10c48, 0x00f06013,
341 		0x00503009, 0x03b0e039, 0x04e11449, 0x01106415,
342 		0x00402c08, 0x0390d838, 0x05011c4b, 0x01206c16,
343 		0x00302807, 0x0370d436, 0x0511204c, 0x01407018,
344 		0x00302406, 0x0340d034, 0x0531244e, 0x01507419,
345 		0x00202005, 0x0320cc32, 0x05412c50, 0x01707c1b,
346 		0x00101c04, 0x0300c431, 0x05613451, 0x0180801d,
347 		0x00101803, 0x02e0c02f, 0x05713853, 0x01a0881e,
348 		0x00101002, 0x02b0bc2d, 0x05814054, 0x01c08c20,
349 		0x00000c02, 0x02a0b82c, 0x05914455, 0x01e09421,
350 		0x00000801, 0x0280b02a, 0x05a14c57, 0x02009c23,
351 		0x00000400, 0x0260ac28, 0x05b15458, 0x0220a025,
352 	};
353 	unsigned int ratio, row, column;
354 
355 	for (ratio = 0; ratio <= 2; ratio++) {
356 		for (row = 0; row <= 15; row++) {
357 			for (column = 0; column <= 3; column++) {
358 				unsigned int index = (ratio << 6) + (row << 2) + column;
359 				u32 value;
360 
361 				value = COEFF_INDEX(index) | COEFF_DATA(coeffs[index]);
362 				tegra_plane_writel(plane, value,
363 						   DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_COEFF);
364 			}
365 		}
366 	}
367 }
368 
369 static void tegra_dc_assign_shared_plane(struct tegra_dc *dc,
370 					 struct tegra_plane *plane)
371 {
372 	u32 value;
373 	int err;
374 
375 	if (!tegra_dc_owns_shared_plane(dc, plane)) {
376 		err = tegra_shared_plane_set_owner(plane, dc);
377 		if (err < 0)
378 			return;
379 	}
380 
381 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
382 	value |= MODE_FOUR_LINES;
383 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_LINEBUF_CONFIG);
384 
385 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
386 	value = SLOTS(1);
387 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_FETCH_METER);
388 
389 	/* disable watermark */
390 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
391 	value &= ~LATENCY_CTL_MODE_ENABLE;
392 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLA);
393 
394 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
395 	value |= WATERMARK_MASK;
396 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_LATENCY_CTLB);
397 
398 	/* pipe meter */
399 	value = tegra_plane_readl(plane, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
400 	value = PIPE_METER_INT(0) | PIPE_METER_FRAC(0);
401 	tegra_plane_writel(plane, value, DC_WIN_CORE_PRECOMP_WGRP_PIPE_METER);
402 
403 	/* mempool entries */
404 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
405 	value = MEMPOOL_ENTRIES(0x331);
406 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_WGRP_POOL_CONFIG);
407 
408 	value = tegra_plane_readl(plane, DC_WIN_CORE_IHUB_THREAD_GROUP);
409 	value &= ~THREAD_NUM_MASK;
410 	value |= THREAD_NUM(plane->base.index);
411 	value |= THREAD_GROUP_ENABLE;
412 	tegra_plane_writel(plane, value, DC_WIN_CORE_IHUB_THREAD_GROUP);
413 
414 	tegra_shared_plane_setup_scaler(plane);
415 
416 	tegra_shared_plane_update(plane);
417 	tegra_shared_plane_activate(plane);
418 }
419 
420 static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
421 					 struct tegra_plane *plane)
422 {
423 	tegra_shared_plane_set_owner(plane, NULL);
424 }
425 
426 static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
427 					   struct drm_atomic_state *state)
428 {
429 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
430 										 plane);
431 	struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
432 	struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
433 	struct tegra_bo_tiling *tiling = &plane_state->tiling;
434 	struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
435 	int err;
436 
437 	/* no need for further checks if the plane is being disabled */
438 	if (!new_plane_state->crtc || !new_plane_state->fb)
439 		return 0;
440 
441 	err = tegra_plane_format(new_plane_state->fb->format->format,
442 				 &plane_state->format,
443 				 &plane_state->swap);
444 	if (err < 0)
445 		return err;
446 
447 	err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
448 	if (err < 0)
449 		return err;
450 
451 	if (tiling->mode == TEGRA_BO_TILING_MODE_BLOCK &&
452 	    !dc->soc->supports_block_linear) {
453 		DRM_ERROR("hardware doesn't support block linear mode\n");
454 		return -EINVAL;
455 	}
456 
457 	if (tiling->sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU &&
458 	    !dc->soc->supports_sector_layout) {
459 		DRM_ERROR("hardware doesn't support GPU sector layout\n");
460 		return -EINVAL;
461 	}
462 
463 	/*
464 	 * Tegra doesn't support different strides for U and V planes so we
465 	 * error out if the user tries to display a framebuffer with such a
466 	 * configuration.
467 	 */
468 	if (new_plane_state->fb->format->num_planes > 2) {
469 		if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
470 			DRM_ERROR("unsupported UV-plane configuration\n");
471 			return -EINVAL;
472 		}
473 	}
474 
475 	/* XXX scaling is not yet supported, add a check here */
476 
477 	err = tegra_plane_state_add(&tegra->base, new_plane_state);
478 	if (err < 0)
479 		return err;
480 
481 	return 0;
482 }
483 
484 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
485 					      struct drm_atomic_state *state)
486 {
487 	struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
488 									   plane);
489 	struct tegra_plane *p = to_tegra_plane(plane);
490 	struct tegra_dc *dc;
491 	u32 value;
492 	int err;
493 
494 	/* rien ne va plus */
495 	if (!old_state || !old_state->crtc)
496 		return;
497 
498 	dc = to_tegra_dc(old_state->crtc);
499 
500 	err = host1x_client_resume(&dc->client);
501 	if (err < 0) {
502 		dev_err(dc->dev, "failed to resume: %d\n", err);
503 		return;
504 	}
505 
506 	/*
507 	 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
508 	 * on planes that are already disabled. Make sure we fallback to the
509 	 * head for this particular state instead of crashing.
510 	 */
511 	if (WARN_ON(p->dc == NULL))
512 		p->dc = dc;
513 
514 	value = tegra_plane_readl(p, DC_WIN_WIN_OPTIONS);
515 	value &= ~WIN_ENABLE;
516 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
517 
518 	tegra_dc_remove_shared_plane(dc, p);
519 
520 	host1x_client_suspend(&dc->client);
521 }
522 
523 static inline u32 compute_phase_incr(fixed20_12 in, unsigned int out)
524 {
525 	u64 tmp, tmp1;
526 
527 	tmp = (u64)dfixed_trunc(in);
528 	tmp1 = (tmp << NFB) + ((u64)out >> 1);
529 	do_div(tmp1, out);
530 
531 	return lower_32_bits(tmp1);
532 }
533 
534 static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
535 					     struct drm_atomic_state *state)
536 {
537 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
538 									   plane);
539 	struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
540 	struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
541 	unsigned int zpos = new_state->normalized_zpos;
542 	struct drm_framebuffer *fb = new_state->fb;
543 	struct tegra_plane *p = to_tegra_plane(plane);
544 	u32 value, min_width, bypass = 0;
545 	dma_addr_t base, addr_flag = 0;
546 	unsigned int bpc, planes;
547 	bool yuv;
548 	int err;
549 
550 	/* rien ne va plus */
551 	if (!new_state->crtc || !new_state->fb)
552 		return;
553 
554 	if (!new_state->visible) {
555 		tegra_shared_plane_atomic_disable(plane, state);
556 		return;
557 	}
558 
559 	err = host1x_client_resume(&dc->client);
560 	if (err < 0) {
561 		dev_err(dc->dev, "failed to resume: %d\n", err);
562 		return;
563 	}
564 
565 	yuv = tegra_plane_format_is_yuv(tegra_plane_state->format, &planes, &bpc);
566 
567 	tegra_dc_assign_shared_plane(dc, p);
568 
569 	tegra_plane_writel(p, VCOUNTER, DC_WIN_CORE_ACT_CONTROL);
570 
571 	/* blending */
572 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
573 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
574 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
575 	tegra_plane_writel(p, value, DC_WIN_BLEND_MATCH_SELECT);
576 
577 	value = BLEND_FACTOR_DST_ALPHA_ZERO | BLEND_FACTOR_SRC_ALPHA_K2 |
578 		BLEND_FACTOR_DST_COLOR_NEG_K1_TIMES_SRC |
579 		BLEND_FACTOR_SRC_COLOR_K1_TIMES_SRC;
580 	tegra_plane_writel(p, value, DC_WIN_BLEND_NOMATCH_SELECT);
581 
582 	value = K2(255) | K1(255) | WINDOW_LAYER_DEPTH(255 - zpos);
583 	tegra_plane_writel(p, value, DC_WIN_BLEND_LAYER_CONTROL);
584 
585 	/* scaling */
586 	min_width = min(new_state->src_w >> 16, new_state->crtc_w);
587 
588 	value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPC);
589 
590 	if (min_width < MAX_PIXELS_5TAP444(value)) {
591 		value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
592 	} else {
593 		value = tegra_plane_readl(p, DC_WINC_PRECOMP_WGRP_PIPE_CAPE);
594 
595 		if (min_width < MAX_PIXELS_2TAP444(value))
596 			value = HORIZONTAL_TAPS_2 | VERTICAL_TAPS_2;
597 		else
598 			dev_err(dc->dev, "invalid minimum width: %u\n", min_width);
599 	}
600 
601 	value = HORIZONTAL_TAPS_5 | VERTICAL_TAPS_5;
602 	tegra_plane_writel(p, value, DC_WIN_WINDOWGROUP_SET_CONTROL_INPUT_SCALER);
603 
604 	if (new_state->src_w != new_state->crtc_w << 16) {
605 		fixed20_12 width = dfixed_init(new_state->src_w >> 16);
606 		u32 incr = compute_phase_incr(width, new_state->crtc_w) & ~0x1;
607 		u32 init = (1 << (NFB - 1)) + (incr >> 1);
608 
609 		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_HPHASE_INCR);
610 		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_H_START_PHASE);
611 	} else {
612 		bypass |= INPUT_SCALER_HBYPASS;
613 	}
614 
615 	if (new_state->src_h != new_state->crtc_h << 16) {
616 		fixed20_12 height = dfixed_init(new_state->src_h >> 16);
617 		u32 incr = compute_phase_incr(height, new_state->crtc_h) & ~0x1;
618 		u32 init = (1 << (NFB - 1)) + (incr >> 1);
619 
620 		tegra_plane_writel(p, incr, DC_WIN_SET_INPUT_SCALER_VPHASE_INCR);
621 		tegra_plane_writel(p, init, DC_WIN_SET_INPUT_SCALER_V_START_PHASE);
622 	} else {
623 		bypass |= INPUT_SCALER_VBYPASS;
624 	}
625 
626 	tegra_plane_writel(p, bypass, DC_WIN_WINDOWGROUP_SET_INPUT_SCALER_USAGE);
627 
628 	/* disable compression */
629 	tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
630 
631 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
632 	/*
633 	 * Physical address bit 39 in Tegra194 is used as a switch for special
634 	 * logic that swizzles the memory using either the legacy Tegra or the
635 	 * dGPU sector layout.
636 	 */
637 	if (tegra_plane_state->tiling.sector_layout == TEGRA_BO_SECTOR_LAYOUT_GPU)
638 		addr_flag = BIT_ULL(39);
639 #endif
640 
641 	base = tegra_plane_state->iova[0] + fb->offsets[0];
642 	base |= addr_flag;
643 
644 	tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
645 	tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
646 
647 	value = V_POSITION(new_state->crtc_y) |
648 		H_POSITION(new_state->crtc_x);
649 	tegra_plane_writel(p, value, DC_WIN_POSITION);
650 
651 	value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
652 	tegra_plane_writel(p, value, DC_WIN_SIZE);
653 
654 	value = WIN_ENABLE | COLOR_EXPAND;
655 	tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
656 
657 	value = V_SIZE(new_state->src_h >> 16) | H_SIZE(new_state->src_w >> 16);
658 	tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
659 
660 	tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
661 	tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR);
662 
663 	value = PITCH(fb->pitches[0]);
664 	tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE);
665 
666 	if (yuv && planes > 1) {
667 		base = tegra_plane_state->iova[1] + fb->offsets[1];
668 		base |= addr_flag;
669 
670 		tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_U);
671 		tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_U);
672 
673 		if (planes > 2) {
674 			base = tegra_plane_state->iova[2] + fb->offsets[2];
675 			base |= addr_flag;
676 
677 			tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI_V);
678 			tegra_plane_writel(p, lower_32_bits(base), DC_WINBUF_START_ADDR_V);
679 		}
680 
681 		value = PITCH_U(fb->pitches[1]);
682 
683 		if (planes > 2)
684 			value |= PITCH_V(fb->pitches[2]);
685 
686 		tegra_plane_writel(p, value, DC_WIN_PLANAR_STORAGE_UV);
687 	} else {
688 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_U);
689 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_U);
690 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_V);
691 		tegra_plane_writel(p, 0, DC_WINBUF_START_ADDR_HI_V);
692 		tegra_plane_writel(p, 0, DC_WIN_PLANAR_STORAGE_UV);
693 	}
694 
695 	value = CLAMP_BEFORE_BLEND | INPUT_RANGE_FULL;
696 
697 	if (yuv) {
698 		if (bpc < 12)
699 			value |= DEGAMMA_YUV8_10;
700 		else
701 			value |= DEGAMMA_YUV12;
702 
703 		/* XXX parameterize */
704 		value |= COLOR_SPACE_YUV_2020;
705 	} else {
706 		if (!tegra_plane_format_is_indexed(tegra_plane_state->format))
707 			value |= DEGAMMA_SRGB;
708 	}
709 
710 	tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
711 
712 	value = OFFSET_X(new_state->src_y >> 16) |
713 		OFFSET_Y(new_state->src_x >> 16);
714 	tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
715 
716 	if (dc->soc->supports_block_linear) {
717 		unsigned long height = tegra_plane_state->tiling.value;
718 
719 		/* XXX */
720 		switch (tegra_plane_state->tiling.mode) {
721 		case TEGRA_BO_TILING_MODE_PITCH:
722 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
723 				DC_WINBUF_SURFACE_KIND_PITCH;
724 			break;
725 
726 		/* XXX not supported on Tegra186 and later */
727 		case TEGRA_BO_TILING_MODE_TILED:
728 			value = DC_WINBUF_SURFACE_KIND_TILED;
729 			break;
730 
731 		case TEGRA_BO_TILING_MODE_BLOCK:
732 			value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(height) |
733 				DC_WINBUF_SURFACE_KIND_BLOCK;
734 			break;
735 		}
736 
737 		tegra_plane_writel(p, value, DC_WINBUF_SURFACE_KIND);
738 	}
739 
740 	/* disable gamut CSC */
741 	value = tegra_plane_readl(p, DC_WIN_WINDOW_SET_CONTROL);
742 	value &= ~CONTROL_CSC_ENABLE;
743 	tegra_plane_writel(p, value, DC_WIN_WINDOW_SET_CONTROL);
744 
745 	host1x_client_suspend(&dc->client);
746 }
747 
748 static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = {
749 	.prepare_fb = tegra_plane_prepare_fb,
750 	.cleanup_fb = tegra_plane_cleanup_fb,
751 	.atomic_check = tegra_shared_plane_atomic_check,
752 	.atomic_update = tegra_shared_plane_atomic_update,
753 	.atomic_disable = tegra_shared_plane_atomic_disable,
754 };
755 
756 struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
757 					    struct tegra_dc *dc,
758 					    unsigned int wgrp,
759 					    unsigned int index,
760 					    enum drm_plane_type type)
761 {
762 	struct tegra_drm *tegra = drm->dev_private;
763 	struct tegra_display_hub *hub = tegra->hub;
764 	struct tegra_shared_plane *plane;
765 	unsigned int possible_crtcs;
766 	unsigned int num_formats;
767 	const u64 *modifiers;
768 	struct drm_plane *p;
769 	const u32 *formats;
770 	int err;
771 
772 	plane = kzalloc_obj(*plane);
773 	if (!plane)
774 		return ERR_PTR(-ENOMEM);
775 
776 	plane->base.offset = 0x0a00 + 0x0300 * index;
777 	plane->base.index = index;
778 
779 	plane->wgrp = &hub->wgrps[wgrp];
780 	plane->wgrp->parent = &dc->client;
781 
782 	p = &plane->base.base;
783 
784 	/* planes can be assigned to arbitrary CRTCs */
785 	possible_crtcs = BIT(tegra->num_crtcs) - 1;
786 
787 	num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
788 	formats = tegra_shared_plane_formats;
789 	modifiers = tegra_shared_plane_modifiers;
790 
791 	err = drm_universal_plane_init(drm, p, possible_crtcs,
792 				       &tegra_plane_funcs, formats,
793 				       num_formats, modifiers, type, NULL);
794 	if (err < 0) {
795 		kfree(plane);
796 		return ERR_PTR(err);
797 	}
798 
799 	drm_plane_helper_add(p, &tegra_shared_plane_helper_funcs);
800 	drm_plane_create_zpos_property(p, 0, 0, 255);
801 
802 	return p;
803 }
804 
805 static struct drm_private_state *
806 tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
807 {
808 	struct tegra_display_hub_state *state;
809 
810 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
811 	if (!state)
812 		return NULL;
813 
814 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
815 
816 	return &state->base;
817 }
818 
819 static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
820 					    struct drm_private_state *state)
821 {
822 	struct tegra_display_hub_state *hub_state =
823 		to_tegra_display_hub_state(state);
824 
825 	kfree(hub_state);
826 }
827 
828 static struct drm_private_state *
829 tegra_display_hub_create_state(struct drm_private_obj *obj)
830 {
831 	struct tegra_display_hub_state *hub_state;
832 
833 	hub_state = kzalloc_obj(*hub_state);
834 	if (!hub_state)
835 		return ERR_PTR(-ENOMEM);
836 
837 	__drm_atomic_helper_private_obj_create_state(obj, &hub_state->base);
838 
839 	return &hub_state->base;
840 }
841 
842 static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
843 	.atomic_create_state = tegra_display_hub_create_state,
844 	.atomic_duplicate_state = tegra_display_hub_duplicate_state,
845 	.atomic_destroy_state = tegra_display_hub_destroy_state,
846 };
847 
848 static struct tegra_display_hub_state *
849 tegra_display_hub_get_state(struct tegra_display_hub *hub,
850 			    struct drm_atomic_state *state)
851 {
852 	struct drm_private_state *priv;
853 
854 	priv = drm_atomic_get_private_obj_state(state, &hub->base);
855 	if (IS_ERR(priv))
856 		return ERR_CAST(priv);
857 
858 	return to_tegra_display_hub_state(priv);
859 }
860 
861 int tegra_display_hub_atomic_check(struct drm_device *drm,
862 				   struct drm_atomic_state *state)
863 {
864 	struct tegra_drm *tegra = drm->dev_private;
865 	struct tegra_display_hub_state *hub_state;
866 	struct drm_crtc_state *old, *new;
867 	struct drm_crtc *crtc;
868 	unsigned int i;
869 
870 	if (!tegra->hub)
871 		return 0;
872 
873 	hub_state = tegra_display_hub_get_state(tegra->hub, state);
874 	if (IS_ERR(hub_state))
875 		return PTR_ERR(hub_state);
876 
877 	/*
878 	 * The display hub display clock needs to be fed by the display clock
879 	 * with the highest frequency to ensure proper functioning of all the
880 	 * displays.
881 	 *
882 	 * Note that this isn't used before Tegra186, but it doesn't hurt and
883 	 * conditionalizing it would make the code less clean.
884 	 */
885 	for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
886 		struct tegra_dc_state *dc = to_dc_state(new);
887 
888 		if (new->active) {
889 			if (!hub_state->clk || dc->pclk > hub_state->rate) {
890 				hub_state->dc = to_tegra_dc(dc->base.crtc);
891 				hub_state->clk = hub_state->dc->clk;
892 				hub_state->rate = dc->pclk;
893 			}
894 		}
895 	}
896 
897 	return 0;
898 }
899 
900 static void tegra_display_hub_update(struct tegra_dc *dc)
901 {
902 	u32 value;
903 	int err;
904 
905 	err = host1x_client_resume(&dc->client);
906 	if (err < 0) {
907 		dev_err(dc->dev, "failed to resume: %d\n", err);
908 		return;
909 	}
910 
911 	value = tegra_dc_readl(dc, DC_CMD_IHUB_COMMON_MISC_CTL);
912 	value &= ~LATENCY_EVENT;
913 	tegra_dc_writel(dc, value, DC_CMD_IHUB_COMMON_MISC_CTL);
914 
915 	value = tegra_dc_readl(dc, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
916 	value = CURS_SLOTS(1) | WGRP_SLOTS(1);
917 	tegra_dc_writel(dc, value, DC_DISP_IHUB_COMMON_DISPLAY_FETCH_METER);
918 
919 	tegra_dc_writel(dc, COMMON_UPDATE, DC_CMD_STATE_CONTROL);
920 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
921 	tegra_dc_writel(dc, COMMON_ACTREQ, DC_CMD_STATE_CONTROL);
922 	tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
923 
924 	host1x_client_suspend(&dc->client);
925 }
926 
927 void tegra_display_hub_atomic_commit(struct drm_device *drm,
928 				     struct drm_atomic_state *state)
929 {
930 	struct tegra_drm *tegra = drm->dev_private;
931 	struct tegra_display_hub *hub = tegra->hub;
932 	struct tegra_display_hub_state *hub_state;
933 	struct device *dev = hub->client.dev;
934 	int err;
935 
936 	hub_state = to_tegra_display_hub_state(hub->base.state);
937 
938 	if (hub_state->clk) {
939 		err = clk_set_rate(hub_state->clk, hub_state->rate);
940 		if (err < 0)
941 			dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
942 				hub_state->clk, hub_state->rate);
943 
944 		err = clk_set_parent(hub->clk_disp, hub_state->clk);
945 		if (err < 0)
946 			dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
947 				hub->clk_disp, hub_state->clk, err);
948 	}
949 
950 	if (hub_state->dc)
951 		tegra_display_hub_update(hub_state->dc);
952 }
953 
954 static int tegra_display_hub_init(struct host1x_client *client)
955 {
956 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
957 	struct drm_device *drm = dev_get_drvdata(client->host);
958 	struct tegra_drm *tegra = drm->dev_private;
959 
960 	drm_atomic_private_obj_init(drm, &hub->base,
961 				    &tegra_display_hub_state_funcs);
962 
963 	tegra->hub = hub;
964 
965 	return 0;
966 }
967 
968 static int tegra_display_hub_exit(struct host1x_client *client)
969 {
970 	struct drm_device *drm = dev_get_drvdata(client->host);
971 	struct tegra_drm *tegra = drm->dev_private;
972 
973 	drm_atomic_private_obj_fini(&tegra->hub->base);
974 	tegra->hub = NULL;
975 
976 	return 0;
977 }
978 
979 static int tegra_display_hub_runtime_suspend(struct host1x_client *client)
980 {
981 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
982 	struct device *dev = client->dev;
983 	unsigned int i = hub->num_heads;
984 	int err;
985 
986 	err = reset_control_assert(hub->rst);
987 	if (err < 0)
988 		return err;
989 
990 	while (i--)
991 		clk_disable_unprepare(hub->clk_heads[i]);
992 
993 	clk_disable_unprepare(hub->clk_hub);
994 	clk_disable_unprepare(hub->clk_dsc);
995 	clk_disable_unprepare(hub->clk_disp);
996 
997 	pm_runtime_put_sync(dev);
998 
999 	return 0;
1000 }
1001 
1002 static int tegra_display_hub_runtime_resume(struct host1x_client *client)
1003 {
1004 	struct tegra_display_hub *hub = to_tegra_display_hub(client);
1005 	struct device *dev = client->dev;
1006 	unsigned int i;
1007 	int err;
1008 
1009 	err = pm_runtime_resume_and_get(dev);
1010 	if (err < 0) {
1011 		dev_err(dev, "failed to get runtime PM: %d\n", err);
1012 		return err;
1013 	}
1014 
1015 	err = clk_prepare_enable(hub->clk_disp);
1016 	if (err < 0)
1017 		goto put_rpm;
1018 
1019 	err = clk_prepare_enable(hub->clk_dsc);
1020 	if (err < 0)
1021 		goto disable_disp;
1022 
1023 	err = clk_prepare_enable(hub->clk_hub);
1024 	if (err < 0)
1025 		goto disable_dsc;
1026 
1027 	for (i = 0; i < hub->num_heads; i++) {
1028 		err = clk_prepare_enable(hub->clk_heads[i]);
1029 		if (err < 0)
1030 			goto disable_heads;
1031 	}
1032 
1033 	err = reset_control_deassert(hub->rst);
1034 	if (err < 0)
1035 		goto disable_heads;
1036 
1037 	return 0;
1038 
1039 disable_heads:
1040 	while (i--)
1041 		clk_disable_unprepare(hub->clk_heads[i]);
1042 
1043 	clk_disable_unprepare(hub->clk_hub);
1044 disable_dsc:
1045 	clk_disable_unprepare(hub->clk_dsc);
1046 disable_disp:
1047 	clk_disable_unprepare(hub->clk_disp);
1048 put_rpm:
1049 	pm_runtime_put_sync(dev);
1050 	return err;
1051 }
1052 
1053 static const struct host1x_client_ops tegra_display_hub_ops = {
1054 	.init = tegra_display_hub_init,
1055 	.exit = tegra_display_hub_exit,
1056 	.suspend = tegra_display_hub_runtime_suspend,
1057 	.resume = tegra_display_hub_runtime_resume,
1058 };
1059 
1060 static int tegra_display_hub_probe(struct platform_device *pdev)
1061 {
1062 	u64 dma_mask = dma_get_mask(pdev->dev.parent);
1063 	struct device_node *child = NULL;
1064 	struct tegra_display_hub *hub;
1065 	struct clk *clk;
1066 	unsigned int i;
1067 	int err;
1068 
1069 	err = dma_coerce_mask_and_coherent(&pdev->dev, dma_mask);
1070 	if (err < 0) {
1071 		dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err);
1072 		return err;
1073 	}
1074 
1075 	hub = devm_kzalloc(&pdev->dev, sizeof(*hub), GFP_KERNEL);
1076 	if (!hub)
1077 		return -ENOMEM;
1078 
1079 	hub->soc = of_device_get_match_data(&pdev->dev);
1080 
1081 	hub->clk_disp = devm_clk_get(&pdev->dev, "disp");
1082 	if (IS_ERR(hub->clk_disp)) {
1083 		err = PTR_ERR(hub->clk_disp);
1084 		return err;
1085 	}
1086 
1087 	if (hub->soc->supports_dsc) {
1088 		hub->clk_dsc = devm_clk_get(&pdev->dev, "dsc");
1089 		if (IS_ERR(hub->clk_dsc)) {
1090 			err = PTR_ERR(hub->clk_dsc);
1091 			return err;
1092 		}
1093 	}
1094 
1095 	hub->clk_hub = devm_clk_get(&pdev->dev, "hub");
1096 	if (IS_ERR(hub->clk_hub)) {
1097 		err = PTR_ERR(hub->clk_hub);
1098 		return err;
1099 	}
1100 
1101 	hub->rst = devm_reset_control_get(&pdev->dev, "misc");
1102 	if (IS_ERR(hub->rst)) {
1103 		err = PTR_ERR(hub->rst);
1104 		return err;
1105 	}
1106 
1107 	hub->wgrps = devm_kcalloc(&pdev->dev, hub->soc->num_wgrps,
1108 				  sizeof(*hub->wgrps), GFP_KERNEL);
1109 	if (!hub->wgrps)
1110 		return -ENOMEM;
1111 
1112 	for (i = 0; i < hub->soc->num_wgrps; i++) {
1113 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1114 		char id[16];
1115 
1116 		snprintf(id, sizeof(id), "wgrp%u", i);
1117 		mutex_init(&wgrp->lock);
1118 		wgrp->usecount = 0;
1119 		wgrp->index = i;
1120 
1121 		wgrp->rst = devm_reset_control_get(&pdev->dev, id);
1122 		if (IS_ERR(wgrp->rst))
1123 			return PTR_ERR(wgrp->rst);
1124 
1125 		err = reset_control_assert(wgrp->rst);
1126 		if (err < 0)
1127 			return err;
1128 	}
1129 
1130 	hub->num_heads = of_get_child_count(pdev->dev.of_node);
1131 
1132 	hub->clk_heads = devm_kcalloc(&pdev->dev, hub->num_heads, sizeof(clk),
1133 				      GFP_KERNEL);
1134 	if (!hub->clk_heads)
1135 		return -ENOMEM;
1136 
1137 	for (i = 0; i < hub->num_heads; i++) {
1138 		child = of_get_next_child(pdev->dev.of_node, child);
1139 		if (!child) {
1140 			dev_err(&pdev->dev, "failed to find node for head %u\n",
1141 				i);
1142 			return -ENODEV;
1143 		}
1144 
1145 		clk = devm_get_clk_from_child(&pdev->dev, child, "dc");
1146 		if (IS_ERR(clk)) {
1147 			dev_err(&pdev->dev, "failed to get clock for head %u\n",
1148 				i);
1149 			of_node_put(child);
1150 			return PTR_ERR(clk);
1151 		}
1152 
1153 		hub->clk_heads[i] = clk;
1154 	}
1155 
1156 	of_node_put(child);
1157 
1158 	/* XXX: enable clock across reset? */
1159 	err = reset_control_assert(hub->rst);
1160 	if (err < 0)
1161 		return err;
1162 
1163 	platform_set_drvdata(pdev, hub);
1164 	pm_runtime_enable(&pdev->dev);
1165 
1166 	INIT_LIST_HEAD(&hub->client.list);
1167 	hub->client.ops = &tegra_display_hub_ops;
1168 	hub->client.dev = &pdev->dev;
1169 
1170 	err = host1x_client_register(&hub->client);
1171 	if (err < 0)
1172 		dev_err(&pdev->dev, "failed to register host1x client: %d\n",
1173 			err);
1174 
1175 	err = devm_of_platform_populate(&pdev->dev);
1176 	if (err < 0)
1177 		goto unregister;
1178 
1179 	return err;
1180 
1181 unregister:
1182 	host1x_client_unregister(&hub->client);
1183 	pm_runtime_disable(&pdev->dev);
1184 	return err;
1185 }
1186 
1187 static void tegra_display_hub_remove(struct platform_device *pdev)
1188 {
1189 	struct tegra_display_hub *hub = platform_get_drvdata(pdev);
1190 	unsigned int i;
1191 
1192 	host1x_client_unregister(&hub->client);
1193 
1194 	for (i = 0; i < hub->soc->num_wgrps; i++) {
1195 		struct tegra_windowgroup *wgrp = &hub->wgrps[i];
1196 
1197 		mutex_destroy(&wgrp->lock);
1198 	}
1199 
1200 	pm_runtime_disable(&pdev->dev);
1201 }
1202 
1203 static const struct tegra_display_hub_soc tegra186_display_hub = {
1204 	.num_wgrps = 6,
1205 	.supports_dsc = true,
1206 };
1207 
1208 static const struct tegra_display_hub_soc tegra194_display_hub = {
1209 	.num_wgrps = 6,
1210 	.supports_dsc = false,
1211 };
1212 
1213 static const struct of_device_id tegra_display_hub_of_match[] = {
1214 	{
1215 		.compatible = "nvidia,tegra194-display",
1216 		.data = &tegra194_display_hub
1217 	}, {
1218 		.compatible = "nvidia,tegra186-display",
1219 		.data = &tegra186_display_hub
1220 	}, {
1221 		/* sentinel */
1222 	}
1223 };
1224 MODULE_DEVICE_TABLE(of, tegra_display_hub_of_match);
1225 
1226 struct platform_driver tegra_display_hub_driver = {
1227 	.driver = {
1228 		.name = "tegra-display-hub",
1229 		.of_match_table = tegra_display_hub_of_match,
1230 	},
1231 	.probe = tegra_display_hub_probe,
1232 	.remove = tegra_display_hub_remove,
1233 };
1234