xref: /linux/drivers/gpu/drm/vc4/vc4_kms.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Broadcom
4  */
5 
6 /**
7  * DOC: VC4 KMS
8  *
9  * This is the general code for implementing KMS mode setting that
10  * doesn't clearly associate with any of the other objects (plane,
11  * crtc, HDMI encoder).
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/sort.h>
16 
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_crtc.h>
20 #include <drm/drm_fourcc.h>
21 #include <drm/drm_gem_framebuffer_helper.h>
22 #include <drm/drm_probe_helper.h>
23 #include <drm/drm_vblank.h>
24 
25 #include "vc4_drv.h"
26 #include "vc4_regs.h"
27 
28 struct vc4_ctm_state {
29 	struct drm_private_state base;
30 	struct drm_color_ctm *ctm;
31 	int fifo;
32 };
33 
34 #define to_vc4_ctm_state(_state)				\
35 	container_of_const(_state, struct vc4_ctm_state, base)
36 
37 struct vc4_load_tracker_state {
38 	struct drm_private_state base;
39 	u64 hvs_load;
40 	u64 membus_load;
41 };
42 
43 #define to_vc4_load_tracker_state(_state)				\
44 	container_of_const(_state, struct vc4_load_tracker_state, base)
45 
vc4_get_ctm_state(struct drm_atomic_state * state,struct drm_private_obj * manager)46 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
47 					       struct drm_private_obj *manager)
48 {
49 	struct drm_device *dev = state->dev;
50 	struct vc4_dev *vc4 = to_vc4_dev(dev);
51 	struct drm_private_state *priv_state;
52 	int ret;
53 
54 	ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
55 	if (ret)
56 		return ERR_PTR(ret);
57 
58 	priv_state = drm_atomic_get_private_obj_state(state, manager);
59 	if (IS_ERR(priv_state))
60 		return ERR_CAST(priv_state);
61 
62 	return to_vc4_ctm_state(priv_state);
63 }
64 
65 static struct drm_private_state *
vc4_ctm_duplicate_state(struct drm_private_obj * obj)66 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
67 {
68 	struct vc4_ctm_state *state;
69 
70 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
71 	if (!state)
72 		return NULL;
73 
74 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
75 
76 	return &state->base;
77 }
78 
vc4_ctm_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)79 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
80 				  struct drm_private_state *state)
81 {
82 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
83 
84 	kfree(ctm_state);
85 }
86 
87 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
88 	.atomic_duplicate_state = vc4_ctm_duplicate_state,
89 	.atomic_destroy_state = vc4_ctm_destroy_state,
90 };
91 
vc4_ctm_obj_fini(struct drm_device * dev,void * unused)92 static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
93 {
94 	struct vc4_dev *vc4 = to_vc4_dev(dev);
95 
96 	drm_atomic_private_obj_fini(&vc4->ctm_manager);
97 }
98 
vc4_ctm_obj_init(struct vc4_dev * vc4)99 static int vc4_ctm_obj_init(struct vc4_dev *vc4)
100 {
101 	struct vc4_ctm_state *ctm_state;
102 
103 	drm_modeset_lock_init(&vc4->ctm_state_lock);
104 
105 	ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
106 	if (!ctm_state)
107 		return -ENOMEM;
108 
109 	drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
110 				    &vc4_ctm_state_funcs);
111 
112 	return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
113 }
114 
115 /* Converts a DRM S31.32 value to the HW S0.9 format. */
vc4_ctm_s31_32_to_s0_9(u64 in)116 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
117 {
118 	u16 r;
119 
120 	/* Sign bit. */
121 	r = in & BIT_ULL(63) ? BIT(9) : 0;
122 
123 	if ((in & GENMASK_ULL(62, 32)) > 0) {
124 		/* We have zero integer bits so we can only saturate here. */
125 		r |= GENMASK(8, 0);
126 	} else {
127 		/* Otherwise take the 9 most important fractional bits. */
128 		r |= (in >> 23) & GENMASK(8, 0);
129 	}
130 
131 	return r;
132 }
133 
134 static void
vc4_ctm_commit(struct vc4_dev * vc4,struct drm_atomic_state * state)135 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
136 {
137 	struct vc4_hvs *hvs = vc4->hvs;
138 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
139 	struct drm_color_ctm *ctm = ctm_state->ctm;
140 
141 	WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
142 
143 	if (ctm_state->fifo) {
144 		HVS_WRITE(SCALER_OLEDCOEF2,
145 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
146 					SCALER_OLEDCOEF2_R_TO_R) |
147 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
148 					SCALER_OLEDCOEF2_R_TO_G) |
149 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
150 					SCALER_OLEDCOEF2_R_TO_B));
151 		HVS_WRITE(SCALER_OLEDCOEF1,
152 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
153 					SCALER_OLEDCOEF1_G_TO_R) |
154 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
155 					SCALER_OLEDCOEF1_G_TO_G) |
156 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
157 					SCALER_OLEDCOEF1_G_TO_B));
158 		HVS_WRITE(SCALER_OLEDCOEF0,
159 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
160 					SCALER_OLEDCOEF0_B_TO_R) |
161 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
162 					SCALER_OLEDCOEF0_B_TO_G) |
163 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
164 					SCALER_OLEDCOEF0_B_TO_B));
165 	}
166 
167 	HVS_WRITE(SCALER_OLEDOFFS,
168 		  VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
169 }
170 
171 struct vc4_hvs_state *
vc4_hvs_get_new_global_state(const struct drm_atomic_state * state)172 vc4_hvs_get_new_global_state(const struct drm_atomic_state *state)
173 {
174 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
175 	struct drm_private_state *priv_state;
176 
177 	priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
178 	if (!priv_state)
179 		return ERR_PTR(-EINVAL);
180 
181 	return to_vc4_hvs_state(priv_state);
182 }
183 
184 struct vc4_hvs_state *
vc4_hvs_get_old_global_state(const struct drm_atomic_state * state)185 vc4_hvs_get_old_global_state(const struct drm_atomic_state *state)
186 {
187 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
188 	struct drm_private_state *priv_state;
189 
190 	priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
191 	if (!priv_state)
192 		return ERR_PTR(-EINVAL);
193 
194 	return to_vc4_hvs_state(priv_state);
195 }
196 
197 struct vc4_hvs_state *
vc4_hvs_get_global_state(struct drm_atomic_state * state)198 vc4_hvs_get_global_state(struct drm_atomic_state *state)
199 {
200 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
201 	struct drm_private_state *priv_state;
202 
203 	priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
204 	if (IS_ERR(priv_state))
205 		return ERR_CAST(priv_state);
206 
207 	return to_vc4_hvs_state(priv_state);
208 }
209 
vc4_hvs_pv_muxing_commit(struct vc4_dev * vc4,struct drm_atomic_state * state)210 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
211 				     struct drm_atomic_state *state)
212 {
213 	struct vc4_hvs *hvs = vc4->hvs;
214 	struct drm_crtc_state *crtc_state;
215 	struct drm_crtc *crtc;
216 	unsigned int i;
217 
218 	WARN_ON_ONCE(vc4->gen != VC4_GEN_4);
219 
220 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
221 		struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
222 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
223 		u32 dispctrl;
224 		u32 dsp3_mux;
225 
226 		if (!crtc_state->active)
227 			continue;
228 
229 		if (vc4_state->assigned_channel != 2)
230 			continue;
231 
232 		/*
233 		 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
234 		 * FIFO X'.
235 		 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
236 		 *
237 		 * DSP3 is connected to FIFO2 unless the transposer is
238 		 * enabled. In this case, FIFO 2 is directly accessed by the
239 		 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
240 		 * route.
241 		 */
242 		if (vc4_crtc->feeds_txp)
243 			dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
244 		else
245 			dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
246 
247 		dispctrl = HVS_READ(SCALER_DISPCTRL) &
248 			   ~SCALER_DISPCTRL_DSP3_MUX_MASK;
249 		HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
250 	}
251 }
252 
vc5_hvs_pv_muxing_commit(struct vc4_dev * vc4,struct drm_atomic_state * state)253 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
254 				     struct drm_atomic_state *state)
255 {
256 	struct vc4_hvs *hvs = vc4->hvs;
257 	struct drm_crtc_state *crtc_state;
258 	struct drm_crtc *crtc;
259 	unsigned char mux;
260 	unsigned int i;
261 	u32 reg;
262 
263 	WARN_ON_ONCE(vc4->gen != VC4_GEN_5);
264 
265 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
266 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
267 		struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
268 		unsigned int channel = vc4_state->assigned_channel;
269 
270 		if (!vc4_state->update_muxing)
271 			continue;
272 
273 		switch (vc4_crtc->data->hvs_output) {
274 		case 2:
275 			drm_WARN_ON(&vc4->base,
276 				    VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL),
277 						  SCALER_DISPCTRL_DSP3_MUX) == channel);
278 
279 			mux = (channel == 2) ? 0 : 1;
280 			reg = HVS_READ(SCALER_DISPECTRL);
281 			HVS_WRITE(SCALER_DISPECTRL,
282 				  (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
283 				  VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
284 			break;
285 
286 		case 3:
287 			if (channel == VC4_HVS_CHANNEL_DISABLED)
288 				mux = 3;
289 			else
290 				mux = channel;
291 
292 			reg = HVS_READ(SCALER_DISPCTRL);
293 			HVS_WRITE(SCALER_DISPCTRL,
294 				  (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
295 				  VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
296 			break;
297 
298 		case 4:
299 			if (channel == VC4_HVS_CHANNEL_DISABLED)
300 				mux = 3;
301 			else
302 				mux = channel;
303 
304 			reg = HVS_READ(SCALER_DISPEOLN);
305 			HVS_WRITE(SCALER_DISPEOLN,
306 				  (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
307 				  VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
308 
309 			break;
310 
311 		case 5:
312 			if (channel == VC4_HVS_CHANNEL_DISABLED)
313 				mux = 3;
314 			else
315 				mux = channel;
316 
317 			reg = HVS_READ(SCALER_DISPDITHER);
318 			HVS_WRITE(SCALER_DISPDITHER,
319 				  (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
320 				  VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
321 			break;
322 
323 		default:
324 			break;
325 		}
326 	}
327 }
328 
vc6_hvs_pv_muxing_commit(struct vc4_dev * vc4,struct drm_atomic_state * state)329 static void vc6_hvs_pv_muxing_commit(struct vc4_dev *vc4,
330 				     struct drm_atomic_state *state)
331 {
332 	struct vc4_hvs *hvs = vc4->hvs;
333 	struct drm_crtc_state *crtc_state;
334 	struct drm_crtc *crtc;
335 	unsigned int i;
336 
337 	WARN_ON_ONCE(vc4->gen != VC4_GEN_6_C && vc4->gen != VC4_GEN_6_D);
338 
339 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
340 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
341 		struct vc4_encoder *vc4_encoder;
342 		struct drm_encoder *encoder;
343 		unsigned char mux;
344 		u32 reg;
345 
346 		if (!vc4_state->update_muxing)
347 			continue;
348 
349 		if (vc4_state->assigned_channel != 1)
350 			continue;
351 
352 		encoder = vc4_get_crtc_encoder(crtc, crtc_state);
353 		vc4_encoder = to_vc4_encoder(encoder);
354 		switch (vc4_encoder->type) {
355 		case VC4_ENCODER_TYPE_HDMI1:
356 			mux = 0;
357 			break;
358 
359 		case VC4_ENCODER_TYPE_TXP1:
360 			mux = 2;
361 			break;
362 
363 		default:
364 			drm_err(&vc4->base, "Unhandled encoder type for PV muxing %d",
365 				vc4_encoder->type);
366 			mux = 0;
367 			break;
368 		}
369 
370 		reg = HVS_READ(SCALER6_CONTROL);
371 		HVS_WRITE(SCALER6_CONTROL,
372 			  (reg & ~SCALER6_CONTROL_DSP1_TARGET_MASK) |
373 			  VC4_SET_FIELD(mux, SCALER6_CONTROL_DSP1_TARGET));
374 	}
375 }
376 
vc4_atomic_commit_tail(struct drm_atomic_state * state)377 static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
378 {
379 	struct drm_device *dev = state->dev;
380 	struct vc4_dev *vc4 = to_vc4_dev(dev);
381 	struct vc4_hvs *hvs = vc4->hvs;
382 	struct vc4_hvs_state *new_hvs_state;
383 	struct vc4_hvs_state *old_hvs_state;
384 	unsigned int channel;
385 
386 	old_hvs_state = vc4_hvs_get_old_global_state(state);
387 	if (WARN_ON(IS_ERR(old_hvs_state)))
388 		return;
389 
390 	new_hvs_state = vc4_hvs_get_new_global_state(state);
391 	if (WARN_ON(IS_ERR(new_hvs_state)))
392 		return;
393 
394 	if (vc4->gen < VC4_GEN_6_C) {
395 		struct drm_crtc_state *new_crtc_state;
396 		struct drm_crtc *crtc;
397 		int i;
398 
399 		for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
400 			struct vc4_crtc_state *vc4_crtc_state;
401 
402 			if (!new_crtc_state->commit)
403 				continue;
404 
405 			vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
406 			vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
407 		}
408 	}
409 
410 	for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
411 		struct drm_crtc_commit *commit;
412 		int ret;
413 
414 		if (!old_hvs_state->fifo_state[channel].in_use)
415 			continue;
416 
417 		commit = old_hvs_state->fifo_state[channel].pending_commit;
418 		if (!commit)
419 			continue;
420 
421 		ret = drm_crtc_commit_wait(commit);
422 		if (ret)
423 			drm_err(dev, "Timed out waiting for commit\n");
424 
425 		drm_crtc_commit_put(commit);
426 		old_hvs_state->fifo_state[channel].pending_commit = NULL;
427 	}
428 
429 	if (vc4->gen == VC4_GEN_5) {
430 		unsigned long state_rate = max(old_hvs_state->core_clock_rate,
431 					       new_hvs_state->core_clock_rate);
432 		unsigned long core_rate = clamp_t(unsigned long, state_rate,
433 						  500000000, hvs->max_core_rate);
434 
435 		drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate);
436 
437 		/*
438 		 * Do a temporary request on the core clock during the
439 		 * modeset.
440 		 */
441 		WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
442 		WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
443 	}
444 
445 	drm_atomic_helper_commit_modeset_disables(dev, state);
446 
447 	if (vc4->gen <= VC4_GEN_5)
448 		vc4_ctm_commit(vc4, state);
449 
450 	switch (vc4->gen) {
451 	case VC4_GEN_4:
452 		vc4_hvs_pv_muxing_commit(vc4, state);
453 		break;
454 
455 	case VC4_GEN_5:
456 		vc5_hvs_pv_muxing_commit(vc4, state);
457 		break;
458 
459 	case VC4_GEN_6_C:
460 	case VC4_GEN_6_D:
461 		vc6_hvs_pv_muxing_commit(vc4, state);
462 		break;
463 
464 	default:
465 		drm_err(dev, "Unknown VC4 generation: %d", vc4->gen);
466 		break;
467 	}
468 
469 	drm_atomic_helper_commit_planes(dev, state,
470 					DRM_PLANE_COMMIT_ACTIVE_ONLY);
471 
472 	drm_atomic_helper_commit_modeset_enables(dev, state);
473 
474 	drm_atomic_helper_fake_vblank(state);
475 
476 	drm_atomic_helper_commit_hw_done(state);
477 
478 	drm_atomic_helper_wait_for_flip_done(dev, state);
479 
480 	drm_atomic_helper_cleanup_planes(dev, state);
481 
482 	if (vc4->gen == VC4_GEN_5) {
483 		unsigned long core_rate = min_t(unsigned long,
484 						hvs->max_core_rate,
485 						new_hvs_state->core_clock_rate);
486 
487 		drm_dbg(dev, "Running the core clock at %lu Hz\n", core_rate);
488 
489 		/*
490 		 * Request a clock rate based on the current HVS
491 		 * requirements.
492 		 */
493 		WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
494 		WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
495 
496 		drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
497 			clk_get_rate(hvs->core_clk));
498 	}
499 }
500 
vc4_atomic_commit_setup(struct drm_atomic_state * state)501 static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
502 {
503 	struct drm_crtc_state *crtc_state;
504 	struct vc4_hvs_state *hvs_state;
505 	struct drm_crtc *crtc;
506 	unsigned int i;
507 
508 	hvs_state = vc4_hvs_get_new_global_state(state);
509 	if (WARN_ON(IS_ERR(hvs_state)))
510 		return PTR_ERR(hvs_state);
511 
512 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
513 		struct vc4_crtc_state *vc4_crtc_state =
514 			to_vc4_crtc_state(crtc_state);
515 		unsigned int channel =
516 			vc4_crtc_state->assigned_channel;
517 
518 		if (channel == VC4_HVS_CHANNEL_DISABLED)
519 			continue;
520 
521 		if (!hvs_state->fifo_state[channel].in_use)
522 			continue;
523 
524 		hvs_state->fifo_state[channel].pending_commit =
525 			drm_crtc_commit_get(crtc_state->commit);
526 	}
527 
528 	return 0;
529 }
530 
vc4_fb_create(struct drm_device * dev,struct drm_file * file_priv,const struct drm_mode_fb_cmd2 * mode_cmd)531 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
532 					     struct drm_file *file_priv,
533 					     const struct drm_mode_fb_cmd2 *mode_cmd)
534 {
535 	struct vc4_dev *vc4 = to_vc4_dev(dev);
536 	struct drm_mode_fb_cmd2 mode_cmd_local;
537 
538 	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
539 		return ERR_PTR(-ENODEV);
540 
541 	/* If the user didn't specify a modifier, use the
542 	 * vc4_set_tiling_ioctl() state for the BO.
543 	 */
544 	if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
545 		struct drm_gem_object *gem_obj;
546 		struct vc4_bo *bo;
547 
548 		gem_obj = drm_gem_object_lookup(file_priv,
549 						mode_cmd->handles[0]);
550 		if (!gem_obj) {
551 			DRM_DEBUG("Failed to look up GEM BO %d\n",
552 				  mode_cmd->handles[0]);
553 			return ERR_PTR(-ENOENT);
554 		}
555 		bo = to_vc4_bo(gem_obj);
556 
557 		mode_cmd_local = *mode_cmd;
558 
559 		if (bo->t_format) {
560 			mode_cmd_local.modifier[0] =
561 				DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
562 		} else {
563 			mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
564 		}
565 
566 		drm_gem_object_put(gem_obj);
567 
568 		mode_cmd = &mode_cmd_local;
569 	}
570 
571 	return drm_gem_fb_create(dev, file_priv, mode_cmd);
572 }
573 
574 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
575  * at a time and the HW only supports S0.9 scalars. To account for the latter,
576  * we don't allow userland to set a CTM that we have no hope of approximating.
577  */
578 static int
vc4_ctm_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)579 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
580 {
581 	struct vc4_dev *vc4 = to_vc4_dev(dev);
582 	struct vc4_ctm_state *ctm_state = NULL;
583 	struct drm_crtc *crtc;
584 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
585 	struct drm_color_ctm *ctm;
586 	int i;
587 
588 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
589 		/* CTM is being disabled. */
590 		if (!new_crtc_state->ctm && old_crtc_state->ctm) {
591 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
592 			if (IS_ERR(ctm_state))
593 				return PTR_ERR(ctm_state);
594 			ctm_state->fifo = 0;
595 		}
596 	}
597 
598 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
599 		if (new_crtc_state->ctm == old_crtc_state->ctm)
600 			continue;
601 
602 		if (!ctm_state) {
603 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
604 			if (IS_ERR(ctm_state))
605 				return PTR_ERR(ctm_state);
606 		}
607 
608 		/* CTM is being enabled or the matrix changed. */
609 		if (new_crtc_state->ctm) {
610 			struct vc4_crtc_state *vc4_crtc_state =
611 				to_vc4_crtc_state(new_crtc_state);
612 
613 			/* fifo is 1-based since 0 disables CTM. */
614 			int fifo = vc4_crtc_state->assigned_channel + 1;
615 
616 			/* Check userland isn't trying to turn on CTM for more
617 			 * than one CRTC at a time.
618 			 */
619 			if (ctm_state->fifo && ctm_state->fifo != fifo) {
620 				DRM_DEBUG_DRIVER("Too many CTM configured\n");
621 				return -EINVAL;
622 			}
623 
624 			/* Check we can approximate the specified CTM.
625 			 * We disallow scalars |c| > 1.0 since the HW has
626 			 * no integer bits.
627 			 */
628 			ctm = new_crtc_state->ctm->data;
629 			for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
630 				u64 val = ctm->matrix[i];
631 
632 				val &= ~BIT_ULL(63);
633 				if (val > BIT_ULL(32))
634 					return -EINVAL;
635 			}
636 
637 			ctm_state->fifo = fifo;
638 			ctm_state->ctm = ctm;
639 		}
640 	}
641 
642 	return 0;
643 }
644 
vc4_load_tracker_atomic_check(struct drm_atomic_state * state)645 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
646 {
647 	struct drm_plane_state *old_plane_state, *new_plane_state;
648 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
649 	struct vc4_load_tracker_state *load_state;
650 	struct drm_private_state *priv_state;
651 	struct drm_plane *plane;
652 	int i;
653 
654 	priv_state = drm_atomic_get_private_obj_state(state,
655 						      &vc4->load_tracker);
656 	if (IS_ERR(priv_state))
657 		return PTR_ERR(priv_state);
658 
659 	load_state = to_vc4_load_tracker_state(priv_state);
660 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
661 				       new_plane_state, i) {
662 		struct vc4_plane_state *vc4_plane_state;
663 
664 		if (old_plane_state->fb && old_plane_state->crtc) {
665 			vc4_plane_state = to_vc4_plane_state(old_plane_state);
666 			load_state->membus_load -= vc4_plane_state->membus_load;
667 			load_state->hvs_load -= vc4_plane_state->hvs_load;
668 		}
669 
670 		if (new_plane_state->fb && new_plane_state->crtc) {
671 			vc4_plane_state = to_vc4_plane_state(new_plane_state);
672 			load_state->membus_load += vc4_plane_state->membus_load;
673 			load_state->hvs_load += vc4_plane_state->hvs_load;
674 		}
675 	}
676 
677 	/* Don't check the load when the tracker is disabled. */
678 	if (!vc4->load_tracker_enabled)
679 		return 0;
680 
681 	/* The absolute limit is 2Gbyte/sec, but let's take a margin to let
682 	 * the system work when other blocks are accessing the memory.
683 	 */
684 	if (load_state->membus_load > SZ_1G + SZ_512M)
685 		return -ENOSPC;
686 
687 	/* HVS clock is supposed to run @ 250Mhz, let's take a margin and
688 	 * consider the maximum number of cycles is 240M.
689 	 */
690 	if (load_state->hvs_load > 240000000ULL)
691 		return -ENOSPC;
692 
693 	return 0;
694 }
695 
696 static struct drm_private_state *
vc4_load_tracker_duplicate_state(struct drm_private_obj * obj)697 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
698 {
699 	struct vc4_load_tracker_state *state;
700 
701 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
702 	if (!state)
703 		return NULL;
704 
705 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
706 
707 	return &state->base;
708 }
709 
vc4_load_tracker_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)710 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
711 					   struct drm_private_state *state)
712 {
713 	struct vc4_load_tracker_state *load_state;
714 
715 	load_state = to_vc4_load_tracker_state(state);
716 	kfree(load_state);
717 }
718 
719 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
720 	.atomic_duplicate_state = vc4_load_tracker_duplicate_state,
721 	.atomic_destroy_state = vc4_load_tracker_destroy_state,
722 };
723 
vc4_load_tracker_obj_fini(struct drm_device * dev,void * unused)724 static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
725 {
726 	struct vc4_dev *vc4 = to_vc4_dev(dev);
727 
728 	drm_atomic_private_obj_fini(&vc4->load_tracker);
729 }
730 
vc4_load_tracker_obj_init(struct vc4_dev * vc4)731 static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
732 {
733 	struct vc4_load_tracker_state *load_state;
734 
735 	load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
736 	if (!load_state)
737 		return -ENOMEM;
738 
739 	drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
740 				    &load_state->base,
741 				    &vc4_load_tracker_state_funcs);
742 
743 	return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
744 }
745 
746 static struct drm_private_state *
vc4_hvs_channels_duplicate_state(struct drm_private_obj * obj)747 vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
748 {
749 	struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
750 	struct vc4_hvs_state *state;
751 	unsigned int i;
752 
753 	state = kzalloc(sizeof(*state), GFP_KERNEL);
754 	if (!state)
755 		return NULL;
756 
757 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
758 
759 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
760 		state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
761 		state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
762 	}
763 
764 	state->core_clock_rate = old_state->core_clock_rate;
765 
766 	return &state->base;
767 }
768 
vc4_hvs_channels_destroy_state(struct drm_private_obj * obj,struct drm_private_state * state)769 static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
770 					   struct drm_private_state *state)
771 {
772 	struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
773 	unsigned int i;
774 
775 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
776 		if (!hvs_state->fifo_state[i].pending_commit)
777 			continue;
778 
779 		drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
780 	}
781 
782 	kfree(hvs_state);
783 }
784 
vc4_hvs_channels_print_state(struct drm_printer * p,const struct drm_private_state * state)785 static void vc4_hvs_channels_print_state(struct drm_printer *p,
786 					 const struct drm_private_state *state)
787 {
788 	const struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
789 	unsigned int i;
790 
791 	drm_printf(p, "HVS State\n");
792 	drm_printf(p, "\tCore Clock Rate: %lu\n", hvs_state->core_clock_rate);
793 
794 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
795 		drm_printf(p, "\tChannel %d\n", i);
796 		drm_printf(p, "\t\tin use=%d\n", hvs_state->fifo_state[i].in_use);
797 		drm_printf(p, "\t\tload=%lu\n", hvs_state->fifo_state[i].fifo_load);
798 	}
799 }
800 
801 static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
802 	.atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
803 	.atomic_destroy_state = vc4_hvs_channels_destroy_state,
804 	.atomic_print_state = vc4_hvs_channels_print_state,
805 };
806 
vc4_hvs_channels_obj_fini(struct drm_device * dev,void * unused)807 static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
808 {
809 	struct vc4_dev *vc4 = to_vc4_dev(dev);
810 
811 	drm_atomic_private_obj_fini(&vc4->hvs_channels);
812 }
813 
vc4_hvs_channels_obj_init(struct vc4_dev * vc4)814 static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
815 {
816 	struct vc4_hvs_state *state;
817 
818 	state = kzalloc(sizeof(*state), GFP_KERNEL);
819 	if (!state)
820 		return -ENOMEM;
821 
822 	drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
823 				    &state->base,
824 				    &vc4_hvs_state_funcs);
825 
826 	return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
827 }
828 
cmp_vc4_crtc_hvs_output(const void * a,const void * b)829 static int cmp_vc4_crtc_hvs_output(const void *a, const void *b)
830 {
831 	const struct vc4_crtc *crtc_a =
832 		to_vc4_crtc(*(const struct drm_crtc **)a);
833 	const struct vc4_crtc_data *data_a =
834 		vc4_crtc_to_vc4_crtc_data(crtc_a);
835 	const struct vc4_crtc *crtc_b =
836 		to_vc4_crtc(*(const struct drm_crtc **)b);
837 	const struct vc4_crtc_data *data_b =
838 		vc4_crtc_to_vc4_crtc_data(crtc_b);
839 
840 	return data_a->hvs_output - data_b->hvs_output;
841 }
842 
843 /*
844  * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
845  * the TXP (and therefore all the CRTCs found on that platform).
846  *
847  * The naive (and our initial) implementation would just iterate over
848  * all the active CRTCs, try to find a suitable FIFO, and then remove it
849  * from the pool of available FIFOs. However, there are a few corner
850  * cases that need to be considered:
851  *
852  * - When running in a dual-display setup (so with two CRTCs involved),
853  *   we can update the state of a single CRTC (for example by changing
854  *   its mode using xrandr under X11) without affecting the other. In
855  *   this case, the other CRTC wouldn't be in the state at all, so we
856  *   need to consider all the running CRTCs in the DRM device to assign
857  *   a FIFO, not just the one in the state.
858  *
859  * - To fix the above, we can't use drm_atomic_get_crtc_state on all
860  *   enabled CRTCs to pull their CRTC state into the global state, since
861  *   a page flip would start considering their vblank to complete. Since
862  *   we don't have a guarantee that they are actually active, that
863  *   vblank might never happen, and shouldn't even be considered if we
864  *   want to do a page flip on a single CRTC. That can be tested by
865  *   doing a modetest -v first on HDMI1 and then on HDMI0.
866  *
867  * - Since we need the pixelvalve to be disabled and enabled back when
868  *   the FIFO is changed, we should keep the FIFO assigned for as long
869  *   as the CRTC is enabled, only considering it free again once that
870  *   CRTC has been disabled. This can be tested by booting X11 on a
871  *   single display, and changing the resolution down and then back up.
872  */
vc4_pv_muxing_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)873 static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
874 				      struct drm_atomic_state *state)
875 {
876 	struct vc4_hvs_state *hvs_new_state;
877 	struct drm_crtc **sorted_crtcs;
878 	struct drm_crtc *crtc;
879 	unsigned int unassigned_channels = 0;
880 	unsigned int i;
881 	int ret;
882 
883 	hvs_new_state = vc4_hvs_get_global_state(state);
884 	if (IS_ERR(hvs_new_state))
885 		return PTR_ERR(hvs_new_state);
886 
887 	for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
888 		if (!hvs_new_state->fifo_state[i].in_use)
889 			unassigned_channels |= BIT(i);
890 
891 	/*
892 	 * The problem we have to solve here is that we have up to 7
893 	 * encoders, connected to up to 6 CRTCs.
894 	 *
895 	 * Those CRTCs, depending on the instance, can be routed to 1, 2
896 	 * or 3 HVS FIFOs, and we need to set the muxing between FIFOs and
897 	 * outputs in the HVS accordingly.
898 	 *
899 	 * It would be pretty hard to come up with an algorithm that
900 	 * would generically solve this. However, the current routing
901 	 * trees we support allow us to simplify a bit the problem.
902 	 *
903 	 * Indeed, with the current supported layouts, if we try to
904 	 * assign in the ascending crtc index order the FIFOs, we can't
905 	 * fall into the situation where an earlier CRTC that had
906 	 * multiple routes is assigned one that was the only option for
907 	 * a later CRTC.
908 	 *
909 	 * If the layout changes and doesn't give us that in the future,
910 	 * we will need to have something smarter, but it works so far.
911 	 */
912 	sorted_crtcs = kmalloc_array(dev->num_crtcs, sizeof(*sorted_crtcs), GFP_KERNEL);
913 	if (!sorted_crtcs)
914 		return -ENOMEM;
915 
916 	i = 0;
917 	drm_for_each_crtc(crtc, dev)
918 		sorted_crtcs[i++] = crtc;
919 
920 	sort(sorted_crtcs, i, sizeof(*sorted_crtcs), cmp_vc4_crtc_hvs_output, NULL);
921 
922 	for (i = 0; i < dev->num_crtcs; i++) {
923 		struct vc4_crtc_state *old_vc4_crtc_state, *new_vc4_crtc_state;
924 		struct drm_crtc_state *old_crtc_state, *new_crtc_state;
925 		struct vc4_crtc *vc4_crtc;
926 		unsigned int matching_channels;
927 		unsigned int channel;
928 
929 		crtc = sorted_crtcs[i];
930 		if (!crtc)
931 			continue;
932 		vc4_crtc = to_vc4_crtc(crtc);
933 
934 		old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
935 		if (!old_crtc_state)
936 			continue;
937 		old_vc4_crtc_state = to_vc4_crtc_state(old_crtc_state);
938 
939 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
940 		if (!new_crtc_state)
941 			continue;
942 		new_vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
943 
944 		drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name);
945 
946 		/* Nothing to do here, let's skip it */
947 		if (old_crtc_state->enable == new_crtc_state->enable) {
948 			if (new_crtc_state->enable)
949 				drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n",
950 					crtc->name, new_vc4_crtc_state->assigned_channel);
951 			else
952 				drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name);
953 
954 			continue;
955 		}
956 
957 		/* Muxing will need to be modified, mark it as such */
958 		new_vc4_crtc_state->update_muxing = true;
959 
960 		/* If we're disabling our CRTC, we put back our channel */
961 		if (!new_crtc_state->enable) {
962 			channel = old_vc4_crtc_state->assigned_channel;
963 
964 			drm_dbg(dev, "%s: Disabling, Freeing channel %d\n",
965 				crtc->name, channel);
966 
967 			hvs_new_state->fifo_state[channel].in_use = false;
968 			new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
969 			continue;
970 		}
971 
972 		matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
973 		if (!matching_channels) {
974 			ret = -EINVAL;
975 			goto err_free_crtc_array;
976 		}
977 
978 		channel = ffs(matching_channels) - 1;
979 
980 		drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name);
981 		new_vc4_crtc_state->assigned_channel = channel;
982 		unassigned_channels &= ~BIT(channel);
983 		hvs_new_state->fifo_state[channel].in_use = true;
984 	}
985 
986 	kfree(sorted_crtcs);
987 	return 0;
988 
989 err_free_crtc_array:
990 	kfree(sorted_crtcs);
991 	return ret;
992 }
993 
994 static int
vc4_core_clock_atomic_check(struct drm_atomic_state * state)995 vc4_core_clock_atomic_check(struct drm_atomic_state *state)
996 {
997 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
998 	struct drm_private_state *priv_state;
999 	struct vc4_hvs_state *hvs_new_state;
1000 	struct vc4_load_tracker_state *load_state;
1001 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1002 	struct drm_crtc *crtc;
1003 	unsigned int num_outputs;
1004 	unsigned long pixel_rate;
1005 	unsigned long cob_rate;
1006 	unsigned int i;
1007 
1008 	priv_state = drm_atomic_get_private_obj_state(state,
1009 						      &vc4->load_tracker);
1010 	if (IS_ERR(priv_state))
1011 		return PTR_ERR(priv_state);
1012 
1013 	load_state = to_vc4_load_tracker_state(priv_state);
1014 
1015 	hvs_new_state = vc4_hvs_get_global_state(state);
1016 	if (IS_ERR(hvs_new_state))
1017 		return PTR_ERR(hvs_new_state);
1018 
1019 	for_each_oldnew_crtc_in_state(state, crtc,
1020 				      old_crtc_state,
1021 				      new_crtc_state,
1022 				      i) {
1023 		if (old_crtc_state->active) {
1024 			struct vc4_crtc_state *old_vc4_state =
1025 				to_vc4_crtc_state(old_crtc_state);
1026 			unsigned int channel = old_vc4_state->assigned_channel;
1027 
1028 			hvs_new_state->fifo_state[channel].fifo_load = 0;
1029 		}
1030 
1031 		if (new_crtc_state->active) {
1032 			struct vc4_crtc_state *new_vc4_state =
1033 				to_vc4_crtc_state(new_crtc_state);
1034 			unsigned int channel = new_vc4_state->assigned_channel;
1035 
1036 			hvs_new_state->fifo_state[channel].fifo_load =
1037 				new_vc4_state->hvs_load;
1038 		}
1039 	}
1040 
1041 	cob_rate = 0;
1042 	num_outputs = 0;
1043 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
1044 		if (!hvs_new_state->fifo_state[i].in_use)
1045 			continue;
1046 
1047 		num_outputs++;
1048 		cob_rate = max_t(unsigned long,
1049 				 hvs_new_state->fifo_state[i].fifo_load,
1050 				 cob_rate);
1051 	}
1052 
1053 	pixel_rate = load_state->hvs_load;
1054 	if (num_outputs > 1) {
1055 		pixel_rate = (pixel_rate * 40) / 100;
1056 	} else {
1057 		pixel_rate = (pixel_rate * 60) / 100;
1058 	}
1059 
1060 	hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);
1061 
1062 	return 0;
1063 }
1064 
1065 
1066 static int
vc4_atomic_check(struct drm_device * dev,struct drm_atomic_state * state)1067 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
1068 {
1069 	int ret;
1070 
1071 	ret = vc4_pv_muxing_atomic_check(dev, state);
1072 	if (ret)
1073 		return ret;
1074 
1075 	ret = vc4_ctm_atomic_check(dev, state);
1076 	if (ret < 0)
1077 		return ret;
1078 
1079 	ret = drm_atomic_helper_check(dev, state);
1080 	if (ret)
1081 		return ret;
1082 
1083 	ret = vc4_load_tracker_atomic_check(state);
1084 	if (ret)
1085 		return ret;
1086 
1087 	return vc4_core_clock_atomic_check(state);
1088 }
1089 
1090 static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
1091 	.atomic_commit_setup	= vc4_atomic_commit_setup,
1092 	.atomic_commit_tail	= vc4_atomic_commit_tail,
1093 };
1094 
1095 static const struct drm_mode_config_funcs vc4_mode_funcs = {
1096 	.atomic_check = vc4_atomic_check,
1097 	.atomic_commit = drm_atomic_helper_commit,
1098 	.fb_create = vc4_fb_create,
1099 };
1100 
1101 static const struct drm_mode_config_funcs vc5_mode_funcs = {
1102 	.atomic_check = vc4_atomic_check,
1103 	.atomic_commit = drm_atomic_helper_commit,
1104 	.fb_create = drm_gem_fb_create,
1105 };
1106 
vc4_kms_load(struct drm_device * dev)1107 int vc4_kms_load(struct drm_device *dev)
1108 {
1109 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1110 	int ret;
1111 
1112 	/*
1113 	 * The limits enforced by the load tracker aren't relevant for
1114 	 * the BCM2711, but the load tracker computations are used for
1115 	 * the core clock rate calculation.
1116 	 */
1117 	if (vc4->gen == VC4_GEN_4) {
1118 		/* Start with the load tracker enabled. Can be
1119 		 * disabled through the debugfs load_tracker file.
1120 		 */
1121 		vc4->load_tracker_enabled = true;
1122 	}
1123 
1124 	/* Set support for vblank irq fast disable, before drm_vblank_init() */
1125 	dev->vblank_disable_immediate = true;
1126 
1127 	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
1128 	if (ret < 0) {
1129 		dev_err(dev->dev, "failed to initialize vblank\n");
1130 		return ret;
1131 	}
1132 
1133 	if (vc4->gen >= VC4_GEN_6_C) {
1134 		dev->mode_config.max_width = 8192;
1135 		dev->mode_config.max_height = 8192;
1136 	} else if (vc4->gen >= VC4_GEN_5) {
1137 		dev->mode_config.max_width = 7680;
1138 		dev->mode_config.max_height = 7680;
1139 	} else {
1140 		dev->mode_config.max_width = 2048;
1141 		dev->mode_config.max_height = 2048;
1142 	}
1143 
1144 	dev->mode_config.funcs = (vc4->gen > VC4_GEN_4) ? &vc5_mode_funcs : &vc4_mode_funcs;
1145 	dev->mode_config.helper_private = &vc4_mode_config_helpers;
1146 	dev->mode_config.preferred_depth = 24;
1147 	dev->mode_config.async_page_flip = true;
1148 	dev->mode_config.normalize_zpos = true;
1149 
1150 	ret = vc4_ctm_obj_init(vc4);
1151 	if (ret)
1152 		return ret;
1153 
1154 	ret = vc4_load_tracker_obj_init(vc4);
1155 	if (ret)
1156 		return ret;
1157 
1158 	ret = vc4_hvs_channels_obj_init(vc4);
1159 	if (ret)
1160 		return ret;
1161 
1162 	drm_mode_config_reset(dev);
1163 
1164 	drm_kms_helper_poll_init(dev);
1165 
1166 	return 0;
1167 }
1168