xref: /linux/drivers/gpu/drm/vc4/vc4_kms.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2015 Broadcom
4  */
5 
6 /**
7  * DOC: VC4 KMS
8  *
9  * This is the general code for implementing KMS mode setting that
10  * doesn't clearly associate with any of the other objects (plane,
11  * crtc, HDMI encoder).
12  */
13 
14 #include <linux/clk.h>
15 #include <linux/sort.h>
16 
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_atomic_helper.h>
19 #include <drm/drm_crtc.h>
20 #include <drm/drm_fourcc.h>
21 #include <drm/drm_gem_framebuffer_helper.h>
22 #include <drm/drm_print.h>
23 #include <drm/drm_probe_helper.h>
24 #include <drm/drm_vblank.h>
25 
26 #include "vc4_drv.h"
27 #include "vc4_regs.h"
28 
29 struct vc4_ctm_state {
30 	struct drm_private_state base;
31 	struct drm_color_ctm *ctm;
32 	int fifo;
33 };
34 
35 #define to_vc4_ctm_state(_state)				\
36 	container_of_const(_state, struct vc4_ctm_state, base)
37 
38 struct vc4_load_tracker_state {
39 	struct drm_private_state base;
40 	u64 hvs_load;
41 	u64 membus_load;
42 };
43 
44 #define to_vc4_load_tracker_state(_state)				\
45 	container_of_const(_state, struct vc4_load_tracker_state, base)
46 
47 static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
48 					       struct drm_private_obj *manager)
49 {
50 	struct drm_device *dev = state->dev;
51 	struct vc4_dev *vc4 = to_vc4_dev(dev);
52 	struct drm_private_state *priv_state;
53 	int ret;
54 
55 	ret = drm_modeset_lock(&vc4->ctm_state_lock, state->acquire_ctx);
56 	if (ret)
57 		return ERR_PTR(ret);
58 
59 	priv_state = drm_atomic_get_private_obj_state(state, manager);
60 	if (IS_ERR(priv_state))
61 		return ERR_CAST(priv_state);
62 
63 	return to_vc4_ctm_state(priv_state);
64 }
65 
66 static struct drm_private_state *
67 vc4_ctm_duplicate_state(struct drm_private_obj *obj)
68 {
69 	struct vc4_ctm_state *state;
70 
71 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
72 	if (!state)
73 		return NULL;
74 
75 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
76 
77 	return &state->base;
78 }
79 
80 static void vc4_ctm_destroy_state(struct drm_private_obj *obj,
81 				  struct drm_private_state *state)
82 {
83 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(state);
84 
85 	kfree(ctm_state);
86 }
87 
88 static const struct drm_private_state_funcs vc4_ctm_state_funcs = {
89 	.atomic_duplicate_state = vc4_ctm_duplicate_state,
90 	.atomic_destroy_state = vc4_ctm_destroy_state,
91 };
92 
93 static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused)
94 {
95 	struct vc4_dev *vc4 = to_vc4_dev(dev);
96 
97 	drm_atomic_private_obj_fini(&vc4->ctm_manager);
98 }
99 
100 static int vc4_ctm_obj_init(struct vc4_dev *vc4)
101 {
102 	struct vc4_ctm_state *ctm_state;
103 
104 	drm_modeset_lock_init(&vc4->ctm_state_lock);
105 
106 	ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL);
107 	if (!ctm_state)
108 		return -ENOMEM;
109 
110 	drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base,
111 				    &vc4_ctm_state_funcs);
112 
113 	return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL);
114 }
115 
116 /* Converts a DRM S31.32 value to the HW S0.9 format. */
117 static u16 vc4_ctm_s31_32_to_s0_9(u64 in)
118 {
119 	u16 r;
120 
121 	/* Sign bit. */
122 	r = in & BIT_ULL(63) ? BIT(9) : 0;
123 
124 	if ((in & GENMASK_ULL(62, 32)) > 0) {
125 		/* We have zero integer bits so we can only saturate here. */
126 		r |= GENMASK(8, 0);
127 	} else {
128 		/* Otherwise take the 9 most important fractional bits. */
129 		r |= (in >> 23) & GENMASK(8, 0);
130 	}
131 
132 	return r;
133 }
134 
135 static void
136 vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state)
137 {
138 	struct vc4_hvs *hvs = vc4->hvs;
139 	struct vc4_ctm_state *ctm_state = to_vc4_ctm_state(vc4->ctm_manager.state);
140 	struct drm_color_ctm *ctm = ctm_state->ctm;
141 
142 	WARN_ON_ONCE(vc4->gen > VC4_GEN_5);
143 
144 	if (ctm_state->fifo) {
145 		HVS_WRITE(SCALER_OLEDCOEF2,
146 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[0]),
147 					SCALER_OLEDCOEF2_R_TO_R) |
148 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[3]),
149 					SCALER_OLEDCOEF2_R_TO_G) |
150 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[6]),
151 					SCALER_OLEDCOEF2_R_TO_B));
152 		HVS_WRITE(SCALER_OLEDCOEF1,
153 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[1]),
154 					SCALER_OLEDCOEF1_G_TO_R) |
155 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[4]),
156 					SCALER_OLEDCOEF1_G_TO_G) |
157 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[7]),
158 					SCALER_OLEDCOEF1_G_TO_B));
159 		HVS_WRITE(SCALER_OLEDCOEF0,
160 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[2]),
161 					SCALER_OLEDCOEF0_B_TO_R) |
162 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[5]),
163 					SCALER_OLEDCOEF0_B_TO_G) |
164 			  VC4_SET_FIELD(vc4_ctm_s31_32_to_s0_9(ctm->matrix[8]),
165 					SCALER_OLEDCOEF0_B_TO_B));
166 	}
167 
168 	HVS_WRITE(SCALER_OLEDOFFS,
169 		  VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO));
170 }
171 
172 struct vc4_hvs_state *
173 vc4_hvs_get_new_global_state(const struct drm_atomic_state *state)
174 {
175 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
176 	struct drm_private_state *priv_state;
177 
178 	priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels);
179 	if (!priv_state)
180 		return ERR_PTR(-EINVAL);
181 
182 	return to_vc4_hvs_state(priv_state);
183 }
184 
185 struct vc4_hvs_state *
186 vc4_hvs_get_old_global_state(const struct drm_atomic_state *state)
187 {
188 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
189 	struct drm_private_state *priv_state;
190 
191 	priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels);
192 	if (!priv_state)
193 		return ERR_PTR(-EINVAL);
194 
195 	return to_vc4_hvs_state(priv_state);
196 }
197 
198 struct vc4_hvs_state *
199 vc4_hvs_get_global_state(struct drm_atomic_state *state)
200 {
201 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
202 	struct drm_private_state *priv_state;
203 
204 	priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels);
205 	if (IS_ERR(priv_state))
206 		return ERR_CAST(priv_state);
207 
208 	return to_vc4_hvs_state(priv_state);
209 }
210 
211 static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4,
212 				     struct drm_atomic_state *state)
213 {
214 	struct vc4_hvs *hvs = vc4->hvs;
215 	struct drm_crtc_state *crtc_state;
216 	struct drm_crtc *crtc;
217 	unsigned int i;
218 
219 	WARN_ON_ONCE(vc4->gen != VC4_GEN_4);
220 
221 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
222 		struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
223 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
224 		u32 dispctrl;
225 		u32 dsp3_mux;
226 
227 		if (!crtc_state->active)
228 			continue;
229 
230 		if (vc4_state->assigned_channel != 2)
231 			continue;
232 
233 		/*
234 		 * SCALER_DISPCTRL_DSP3 = X, where X < 2 means 'connect DSP3 to
235 		 * FIFO X'.
236 		 * SCALER_DISPCTRL_DSP3 = 3 means 'disable DSP 3'.
237 		 *
238 		 * DSP3 is connected to FIFO2 unless the transposer is
239 		 * enabled. In this case, FIFO 2 is directly accessed by the
240 		 * TXP IP, and we need to disable the FIFO2 -> pixelvalve1
241 		 * route.
242 		 */
243 		if (vc4_crtc->feeds_txp)
244 			dsp3_mux = VC4_SET_FIELD(3, SCALER_DISPCTRL_DSP3_MUX);
245 		else
246 			dsp3_mux = VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
247 
248 		dispctrl = HVS_READ(SCALER_DISPCTRL) &
249 			   ~SCALER_DISPCTRL_DSP3_MUX_MASK;
250 		HVS_WRITE(SCALER_DISPCTRL, dispctrl | dsp3_mux);
251 	}
252 }
253 
254 static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4,
255 				     struct drm_atomic_state *state)
256 {
257 	struct vc4_hvs *hvs = vc4->hvs;
258 	struct drm_crtc_state *crtc_state;
259 	struct drm_crtc *crtc;
260 	unsigned char mux;
261 	unsigned int i;
262 	u32 reg;
263 
264 	WARN_ON_ONCE(vc4->gen != VC4_GEN_5);
265 
266 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
267 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
268 		struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
269 		unsigned int channel = vc4_state->assigned_channel;
270 
271 		if (!vc4_state->update_muxing)
272 			continue;
273 
274 		switch (vc4_crtc->data->hvs_output) {
275 		case 2:
276 			drm_WARN_ON(&vc4->base,
277 				    VC4_GET_FIELD(HVS_READ(SCALER_DISPCTRL),
278 						  SCALER_DISPCTRL_DSP3_MUX) == channel);
279 
280 			mux = (channel == 2) ? 0 : 1;
281 			reg = HVS_READ(SCALER_DISPECTRL);
282 			HVS_WRITE(SCALER_DISPECTRL,
283 				  (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) |
284 				  VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX));
285 			break;
286 
287 		case 3:
288 			if (channel == VC4_HVS_CHANNEL_DISABLED)
289 				mux = 3;
290 			else
291 				mux = channel;
292 
293 			reg = HVS_READ(SCALER_DISPCTRL);
294 			HVS_WRITE(SCALER_DISPCTRL,
295 				  (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) |
296 				  VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX));
297 			break;
298 
299 		case 4:
300 			if (channel == VC4_HVS_CHANNEL_DISABLED)
301 				mux = 3;
302 			else
303 				mux = channel;
304 
305 			reg = HVS_READ(SCALER_DISPEOLN);
306 			HVS_WRITE(SCALER_DISPEOLN,
307 				  (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) |
308 				  VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX));
309 
310 			break;
311 
312 		case 5:
313 			if (channel == VC4_HVS_CHANNEL_DISABLED)
314 				mux = 3;
315 			else
316 				mux = channel;
317 
318 			reg = HVS_READ(SCALER_DISPDITHER);
319 			HVS_WRITE(SCALER_DISPDITHER,
320 				  (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) |
321 				  VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX));
322 			break;
323 
324 		default:
325 			break;
326 		}
327 	}
328 }
329 
330 static void vc6_hvs_pv_muxing_commit(struct vc4_dev *vc4,
331 				     struct drm_atomic_state *state)
332 {
333 	struct vc4_hvs *hvs = vc4->hvs;
334 	struct drm_crtc_state *crtc_state;
335 	struct drm_crtc *crtc;
336 	unsigned int i;
337 
338 	WARN_ON_ONCE(vc4->gen != VC4_GEN_6_C && vc4->gen != VC4_GEN_6_D);
339 
340 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
341 		struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
342 		struct vc4_encoder *vc4_encoder;
343 		struct drm_encoder *encoder;
344 		unsigned char mux;
345 		u32 reg;
346 
347 		if (!vc4_state->update_muxing)
348 			continue;
349 
350 		if (vc4_state->assigned_channel != 1)
351 			continue;
352 
353 		encoder = vc4_get_crtc_encoder(crtc, crtc_state);
354 		vc4_encoder = to_vc4_encoder(encoder);
355 		switch (vc4_encoder->type) {
356 		case VC4_ENCODER_TYPE_HDMI1:
357 			mux = 0;
358 			break;
359 
360 		case VC4_ENCODER_TYPE_TXP1:
361 			mux = 2;
362 			break;
363 
364 		default:
365 			drm_err(&vc4->base, "Unhandled encoder type for PV muxing %d",
366 				vc4_encoder->type);
367 			mux = 0;
368 			break;
369 		}
370 
371 		reg = HVS_READ(SCALER6_CONTROL);
372 		HVS_WRITE(SCALER6_CONTROL,
373 			  (reg & ~SCALER6_CONTROL_DSP1_TARGET_MASK) |
374 			  VC4_SET_FIELD(mux, SCALER6_CONTROL_DSP1_TARGET));
375 	}
376 }
377 
378 static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
379 {
380 	struct drm_device *dev = state->dev;
381 	struct vc4_dev *vc4 = to_vc4_dev(dev);
382 	struct vc4_hvs *hvs = vc4->hvs;
383 	struct vc4_hvs_state *new_hvs_state;
384 	struct vc4_hvs_state *old_hvs_state;
385 	unsigned int channel;
386 
387 	old_hvs_state = vc4_hvs_get_old_global_state(state);
388 	if (WARN_ON(IS_ERR(old_hvs_state)))
389 		return;
390 
391 	new_hvs_state = vc4_hvs_get_new_global_state(state);
392 	if (WARN_ON(IS_ERR(new_hvs_state)))
393 		return;
394 
395 	if (vc4->gen < VC4_GEN_6_C) {
396 		struct drm_crtc_state *new_crtc_state;
397 		struct drm_crtc *crtc;
398 		int i;
399 
400 		for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
401 			struct vc4_crtc_state *vc4_crtc_state;
402 
403 			if (!new_crtc_state->commit)
404 				continue;
405 
406 			vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
407 			vc4_hvs_mask_underrun(hvs, vc4_crtc_state->assigned_channel);
408 		}
409 	}
410 
411 	for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
412 		struct drm_crtc_commit *commit;
413 		int ret;
414 
415 		if (!old_hvs_state->fifo_state[channel].in_use)
416 			continue;
417 
418 		commit = old_hvs_state->fifo_state[channel].pending_commit;
419 		if (!commit)
420 			continue;
421 
422 		ret = drm_crtc_commit_wait(commit);
423 		if (ret)
424 			drm_err(dev, "Timed out waiting for commit\n");
425 
426 		drm_crtc_commit_put(commit);
427 		old_hvs_state->fifo_state[channel].pending_commit = NULL;
428 	}
429 
430 	if (vc4->gen == VC4_GEN_5) {
431 		unsigned long state_rate = max(old_hvs_state->core_clock_rate,
432 					       new_hvs_state->core_clock_rate);
433 		unsigned long core_rate = clamp_t(unsigned long, state_rate,
434 						  500000000, hvs->max_core_rate);
435 
436 		drm_dbg(dev, "Raising the core clock at %lu Hz\n", core_rate);
437 
438 		/*
439 		 * Do a temporary request on the core clock during the
440 		 * modeset.
441 		 */
442 		WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
443 		WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
444 	}
445 
446 	drm_atomic_helper_commit_modeset_disables(dev, state);
447 
448 	if (vc4->gen <= VC4_GEN_5)
449 		vc4_ctm_commit(vc4, state);
450 
451 	switch (vc4->gen) {
452 	case VC4_GEN_4:
453 		vc4_hvs_pv_muxing_commit(vc4, state);
454 		break;
455 
456 	case VC4_GEN_5:
457 		vc5_hvs_pv_muxing_commit(vc4, state);
458 		break;
459 
460 	case VC4_GEN_6_C:
461 	case VC4_GEN_6_D:
462 		vc6_hvs_pv_muxing_commit(vc4, state);
463 		break;
464 
465 	default:
466 		drm_err(dev, "Unknown VC4 generation: %d", vc4->gen);
467 		break;
468 	}
469 
470 	drm_atomic_helper_commit_planes(dev, state,
471 					DRM_PLANE_COMMIT_ACTIVE_ONLY);
472 
473 	drm_atomic_helper_commit_modeset_enables(dev, state);
474 
475 	drm_atomic_helper_fake_vblank(state);
476 
477 	drm_atomic_helper_commit_hw_done(state);
478 
479 	drm_atomic_helper_wait_for_flip_done(dev, state);
480 
481 	drm_atomic_helper_cleanup_planes(dev, state);
482 
483 	if (vc4->gen == VC4_GEN_5) {
484 		unsigned long core_rate = min_t(unsigned long,
485 						hvs->max_core_rate,
486 						new_hvs_state->core_clock_rate);
487 
488 		drm_dbg(dev, "Running the core clock at %lu Hz\n", core_rate);
489 
490 		/*
491 		 * Request a clock rate based on the current HVS
492 		 * requirements.
493 		 */
494 		WARN_ON(clk_set_min_rate(hvs->core_clk, core_rate));
495 		WARN_ON(clk_set_min_rate(hvs->disp_clk, core_rate));
496 
497 		drm_dbg(dev, "Core clock actual rate: %lu Hz\n",
498 			clk_get_rate(hvs->core_clk));
499 	}
500 }
501 
502 static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
503 {
504 	struct drm_crtc_state *crtc_state;
505 	struct vc4_hvs_state *hvs_state;
506 	struct drm_crtc *crtc;
507 	unsigned int i;
508 
509 	hvs_state = vc4_hvs_get_new_global_state(state);
510 	if (WARN_ON(IS_ERR(hvs_state)))
511 		return PTR_ERR(hvs_state);
512 
513 	for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
514 		struct vc4_crtc_state *vc4_crtc_state =
515 			to_vc4_crtc_state(crtc_state);
516 		unsigned int channel =
517 			vc4_crtc_state->assigned_channel;
518 
519 		if (channel == VC4_HVS_CHANNEL_DISABLED)
520 			continue;
521 
522 		if (!hvs_state->fifo_state[channel].in_use)
523 			continue;
524 
525 		hvs_state->fifo_state[channel].pending_commit =
526 			drm_crtc_commit_get(crtc_state->commit);
527 	}
528 
529 	return 0;
530 }
531 
532 static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
533 					     struct drm_file *file_priv,
534 					     const struct drm_format_info *info,
535 					     const struct drm_mode_fb_cmd2 *mode_cmd)
536 {
537 	struct vc4_dev *vc4 = to_vc4_dev(dev);
538 	struct drm_mode_fb_cmd2 mode_cmd_local;
539 
540 	if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4))
541 		return ERR_PTR(-ENODEV);
542 
543 	/* If the user didn't specify a modifier, use the
544 	 * vc4_set_tiling_ioctl() state for the BO.
545 	 */
546 	if (!(mode_cmd->flags & DRM_MODE_FB_MODIFIERS)) {
547 		struct drm_gem_object *gem_obj;
548 		struct vc4_bo *bo;
549 
550 		gem_obj = drm_gem_object_lookup(file_priv,
551 						mode_cmd->handles[0]);
552 		if (!gem_obj) {
553 			DRM_DEBUG("Failed to look up GEM BO %d\n",
554 				  mode_cmd->handles[0]);
555 			return ERR_PTR(-ENOENT);
556 		}
557 		bo = to_vc4_bo(gem_obj);
558 
559 		mode_cmd_local = *mode_cmd;
560 
561 		if (bo->t_format) {
562 			mode_cmd_local.modifier[0] =
563 				DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED;
564 		} else {
565 			mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
566 		}
567 
568 		drm_gem_object_put(gem_obj);
569 
570 		mode_cmd = &mode_cmd_local;
571 	}
572 
573 	return drm_gem_fb_create(dev, file_priv, info, mode_cmd);
574 }
575 
576 /* Our CTM has some peculiar limitations: we can only enable it for one CRTC
577  * at a time and the HW only supports S0.9 scalars. To account for the latter,
578  * we don't allow userland to set a CTM that we have no hope of approximating.
579  */
580 static int
581 vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
582 {
583 	struct vc4_dev *vc4 = to_vc4_dev(dev);
584 	struct vc4_ctm_state *ctm_state = NULL;
585 	struct drm_crtc *crtc;
586 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
587 	struct drm_color_ctm *ctm;
588 	int i;
589 
590 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
591 		/* CTM is being disabled. */
592 		if (!new_crtc_state->ctm && old_crtc_state->ctm) {
593 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
594 			if (IS_ERR(ctm_state))
595 				return PTR_ERR(ctm_state);
596 			ctm_state->fifo = 0;
597 		}
598 	}
599 
600 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
601 		if (new_crtc_state->ctm == old_crtc_state->ctm)
602 			continue;
603 
604 		if (!ctm_state) {
605 			ctm_state = vc4_get_ctm_state(state, &vc4->ctm_manager);
606 			if (IS_ERR(ctm_state))
607 				return PTR_ERR(ctm_state);
608 		}
609 
610 		/* CTM is being enabled or the matrix changed. */
611 		if (new_crtc_state->ctm) {
612 			struct vc4_crtc_state *vc4_crtc_state =
613 				to_vc4_crtc_state(new_crtc_state);
614 
615 			/* fifo is 1-based since 0 disables CTM. */
616 			int fifo = vc4_crtc_state->assigned_channel + 1;
617 
618 			/* Check userland isn't trying to turn on CTM for more
619 			 * than one CRTC at a time.
620 			 */
621 			if (ctm_state->fifo && ctm_state->fifo != fifo) {
622 				DRM_DEBUG_DRIVER("Too many CTM configured\n");
623 				return -EINVAL;
624 			}
625 
626 			/* Check we can approximate the specified CTM.
627 			 * We disallow scalars |c| > 1.0 since the HW has
628 			 * no integer bits.
629 			 */
630 			ctm = new_crtc_state->ctm->data;
631 			for (i = 0; i < ARRAY_SIZE(ctm->matrix); i++) {
632 				u64 val = ctm->matrix[i];
633 
634 				val &= ~BIT_ULL(63);
635 				if (val > BIT_ULL(32))
636 					return -EINVAL;
637 			}
638 
639 			ctm_state->fifo = fifo;
640 			ctm_state->ctm = ctm;
641 		}
642 	}
643 
644 	return 0;
645 }
646 
647 static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
648 {
649 	struct drm_plane_state *old_plane_state, *new_plane_state;
650 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
651 	struct vc4_load_tracker_state *load_state;
652 	struct drm_private_state *priv_state;
653 	struct drm_plane *plane;
654 	int i;
655 
656 	priv_state = drm_atomic_get_private_obj_state(state,
657 						      &vc4->load_tracker);
658 	if (IS_ERR(priv_state))
659 		return PTR_ERR(priv_state);
660 
661 	load_state = to_vc4_load_tracker_state(priv_state);
662 	for_each_oldnew_plane_in_state(state, plane, old_plane_state,
663 				       new_plane_state, i) {
664 		struct vc4_plane_state *vc4_plane_state;
665 
666 		if (old_plane_state->fb && old_plane_state->crtc) {
667 			vc4_plane_state = to_vc4_plane_state(old_plane_state);
668 			load_state->membus_load -= vc4_plane_state->membus_load;
669 			load_state->hvs_load -= vc4_plane_state->hvs_load;
670 		}
671 
672 		if (new_plane_state->fb && new_plane_state->crtc) {
673 			vc4_plane_state = to_vc4_plane_state(new_plane_state);
674 			load_state->membus_load += vc4_plane_state->membus_load;
675 			load_state->hvs_load += vc4_plane_state->hvs_load;
676 		}
677 	}
678 
679 	/* Don't check the load when the tracker is disabled. */
680 	if (!vc4->load_tracker_enabled)
681 		return 0;
682 
683 	/* The absolute limit is 2Gbyte/sec, but let's take a margin to let
684 	 * the system work when other blocks are accessing the memory.
685 	 */
686 	if (load_state->membus_load > SZ_1G + SZ_512M)
687 		return -ENOSPC;
688 
689 	/* HVS clock is supposed to run @ 250Mhz, let's take a margin and
690 	 * consider the maximum number of cycles is 240M.
691 	 */
692 	if (load_state->hvs_load > 240000000ULL)
693 		return -ENOSPC;
694 
695 	return 0;
696 }
697 
698 static struct drm_private_state *
699 vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
700 {
701 	struct vc4_load_tracker_state *state;
702 
703 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
704 	if (!state)
705 		return NULL;
706 
707 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
708 
709 	return &state->base;
710 }
711 
712 static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
713 					   struct drm_private_state *state)
714 {
715 	struct vc4_load_tracker_state *load_state;
716 
717 	load_state = to_vc4_load_tracker_state(state);
718 	kfree(load_state);
719 }
720 
721 static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
722 	.atomic_duplicate_state = vc4_load_tracker_duplicate_state,
723 	.atomic_destroy_state = vc4_load_tracker_destroy_state,
724 };
725 
726 static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused)
727 {
728 	struct vc4_dev *vc4 = to_vc4_dev(dev);
729 
730 	drm_atomic_private_obj_fini(&vc4->load_tracker);
731 }
732 
733 static int vc4_load_tracker_obj_init(struct vc4_dev *vc4)
734 {
735 	struct vc4_load_tracker_state *load_state;
736 
737 	load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
738 	if (!load_state)
739 		return -ENOMEM;
740 
741 	drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker,
742 				    &load_state->base,
743 				    &vc4_load_tracker_state_funcs);
744 
745 	return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL);
746 }
747 
748 static struct drm_private_state *
749 vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
750 {
751 	struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state);
752 	struct vc4_hvs_state *state;
753 	unsigned int i;
754 
755 	state = kzalloc(sizeof(*state), GFP_KERNEL);
756 	if (!state)
757 		return NULL;
758 
759 	__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
760 
761 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
762 		state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
763 		state->fifo_state[i].fifo_load = old_state->fifo_state[i].fifo_load;
764 	}
765 
766 	state->core_clock_rate = old_state->core_clock_rate;
767 
768 	return &state->base;
769 }
770 
771 static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj,
772 					   struct drm_private_state *state)
773 {
774 	struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
775 	unsigned int i;
776 
777 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
778 		if (!hvs_state->fifo_state[i].pending_commit)
779 			continue;
780 
781 		drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit);
782 	}
783 
784 	kfree(hvs_state);
785 }
786 
787 static void vc4_hvs_channels_print_state(struct drm_printer *p,
788 					 const struct drm_private_state *state)
789 {
790 	const struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state);
791 	unsigned int i;
792 
793 	drm_printf(p, "HVS State\n");
794 	drm_printf(p, "\tCore Clock Rate: %lu\n", hvs_state->core_clock_rate);
795 
796 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
797 		drm_printf(p, "\tChannel %d\n", i);
798 		drm_printf(p, "\t\tin use=%d\n", hvs_state->fifo_state[i].in_use);
799 		drm_printf(p, "\t\tload=%lu\n", hvs_state->fifo_state[i].fifo_load);
800 	}
801 }
802 
803 static const struct drm_private_state_funcs vc4_hvs_state_funcs = {
804 	.atomic_duplicate_state = vc4_hvs_channels_duplicate_state,
805 	.atomic_destroy_state = vc4_hvs_channels_destroy_state,
806 	.atomic_print_state = vc4_hvs_channels_print_state,
807 };
808 
809 static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused)
810 {
811 	struct vc4_dev *vc4 = to_vc4_dev(dev);
812 
813 	drm_atomic_private_obj_fini(&vc4->hvs_channels);
814 }
815 
816 static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4)
817 {
818 	struct vc4_hvs_state *state;
819 
820 	state = kzalloc(sizeof(*state), GFP_KERNEL);
821 	if (!state)
822 		return -ENOMEM;
823 
824 	drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels,
825 				    &state->base,
826 				    &vc4_hvs_state_funcs);
827 
828 	return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL);
829 }
830 
831 static int cmp_vc4_crtc_hvs_output(const void *a, const void *b)
832 {
833 	const struct vc4_crtc *crtc_a =
834 		to_vc4_crtc(*(const struct drm_crtc **)a);
835 	const struct vc4_crtc_data *data_a =
836 		vc4_crtc_to_vc4_crtc_data(crtc_a);
837 	const struct vc4_crtc *crtc_b =
838 		to_vc4_crtc(*(const struct drm_crtc **)b);
839 	const struct vc4_crtc_data *data_b =
840 		vc4_crtc_to_vc4_crtc_data(crtc_b);
841 
842 	return data_a->hvs_output - data_b->hvs_output;
843 }
844 
845 /*
846  * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and
847  * the TXP (and therefore all the CRTCs found on that platform).
848  *
849  * The naive (and our initial) implementation would just iterate over
850  * all the active CRTCs, try to find a suitable FIFO, and then remove it
851  * from the pool of available FIFOs. However, there are a few corner
852  * cases that need to be considered:
853  *
854  * - When running in a dual-display setup (so with two CRTCs involved),
855  *   we can update the state of a single CRTC (for example by changing
856  *   its mode using xrandr under X11) without affecting the other. In
857  *   this case, the other CRTC wouldn't be in the state at all, so we
858  *   need to consider all the running CRTCs in the DRM device to assign
859  *   a FIFO, not just the one in the state.
860  *
861  * - To fix the above, we can't use drm_atomic_get_crtc_state on all
862  *   enabled CRTCs to pull their CRTC state into the global state, since
863  *   a page flip would start considering their vblank to complete. Since
864  *   we don't have a guarantee that they are actually active, that
865  *   vblank might never happen, and shouldn't even be considered if we
866  *   want to do a page flip on a single CRTC. That can be tested by
867  *   doing a modetest -v first on HDMI1 and then on HDMI0.
868  *
869  * - Since we need the pixelvalve to be disabled and enabled back when
870  *   the FIFO is changed, we should keep the FIFO assigned for as long
871  *   as the CRTC is enabled, only considering it free again once that
872  *   CRTC has been disabled. This can be tested by booting X11 on a
873  *   single display, and changing the resolution down and then back up.
874  */
875 static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
876 				      struct drm_atomic_state *state)
877 {
878 	struct vc4_hvs_state *hvs_new_state;
879 	struct drm_crtc **sorted_crtcs;
880 	struct drm_crtc *crtc;
881 	unsigned int unassigned_channels = 0;
882 	unsigned int i;
883 	int ret;
884 
885 	hvs_new_state = vc4_hvs_get_global_state(state);
886 	if (IS_ERR(hvs_new_state))
887 		return PTR_ERR(hvs_new_state);
888 
889 	for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
890 		if (!hvs_new_state->fifo_state[i].in_use)
891 			unassigned_channels |= BIT(i);
892 
893 	/*
894 	 * The problem we have to solve here is that we have up to 7
895 	 * encoders, connected to up to 6 CRTCs.
896 	 *
897 	 * Those CRTCs, depending on the instance, can be routed to 1, 2
898 	 * or 3 HVS FIFOs, and we need to set the muxing between FIFOs and
899 	 * outputs in the HVS accordingly.
900 	 *
901 	 * It would be pretty hard to come up with an algorithm that
902 	 * would generically solve this. However, the current routing
903 	 * trees we support allow us to simplify a bit the problem.
904 	 *
905 	 * Indeed, with the current supported layouts, if we try to
906 	 * assign in the ascending crtc index order the FIFOs, we can't
907 	 * fall into the situation where an earlier CRTC that had
908 	 * multiple routes is assigned one that was the only option for
909 	 * a later CRTC.
910 	 *
911 	 * If the layout changes and doesn't give us that in the future,
912 	 * we will need to have something smarter, but it works so far.
913 	 */
914 	sorted_crtcs = kmalloc_array(dev->num_crtcs, sizeof(*sorted_crtcs), GFP_KERNEL);
915 	if (!sorted_crtcs)
916 		return -ENOMEM;
917 
918 	i = 0;
919 	drm_for_each_crtc(crtc, dev)
920 		sorted_crtcs[i++] = crtc;
921 
922 	sort(sorted_crtcs, i, sizeof(*sorted_crtcs), cmp_vc4_crtc_hvs_output, NULL);
923 
924 	for (i = 0; i < dev->num_crtcs; i++) {
925 		struct vc4_crtc_state *old_vc4_crtc_state, *new_vc4_crtc_state;
926 		struct drm_crtc_state *old_crtc_state, *new_crtc_state;
927 		struct vc4_crtc *vc4_crtc;
928 		unsigned int matching_channels;
929 		unsigned int channel;
930 
931 		crtc = sorted_crtcs[i];
932 		if (!crtc)
933 			continue;
934 		vc4_crtc = to_vc4_crtc(crtc);
935 
936 		old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
937 		if (!old_crtc_state)
938 			continue;
939 		old_vc4_crtc_state = to_vc4_crtc_state(old_crtc_state);
940 
941 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
942 		if (!new_crtc_state)
943 			continue;
944 		new_vc4_crtc_state = to_vc4_crtc_state(new_crtc_state);
945 
946 		drm_dbg(dev, "%s: Trying to find a channel.\n", crtc->name);
947 
948 		/* Nothing to do here, let's skip it */
949 		if (old_crtc_state->enable == new_crtc_state->enable) {
950 			if (new_crtc_state->enable)
951 				drm_dbg(dev, "%s: Already enabled, reusing channel %d.\n",
952 					crtc->name, new_vc4_crtc_state->assigned_channel);
953 			else
954 				drm_dbg(dev, "%s: Disabled, ignoring.\n", crtc->name);
955 
956 			continue;
957 		}
958 
959 		/* Muxing will need to be modified, mark it as such */
960 		new_vc4_crtc_state->update_muxing = true;
961 
962 		/* If we're disabling our CRTC, we put back our channel */
963 		if (!new_crtc_state->enable) {
964 			channel = old_vc4_crtc_state->assigned_channel;
965 
966 			drm_dbg(dev, "%s: Disabling, Freeing channel %d\n",
967 				crtc->name, channel);
968 
969 			hvs_new_state->fifo_state[channel].in_use = false;
970 			new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
971 			continue;
972 		}
973 
974 		matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels;
975 		if (!matching_channels) {
976 			ret = -EINVAL;
977 			goto err_free_crtc_array;
978 		}
979 
980 		channel = ffs(matching_channels) - 1;
981 
982 		drm_dbg(dev, "Assigned HVS channel %d to CRTC %s\n", channel, crtc->name);
983 		new_vc4_crtc_state->assigned_channel = channel;
984 		unassigned_channels &= ~BIT(channel);
985 		hvs_new_state->fifo_state[channel].in_use = true;
986 	}
987 
988 	kfree(sorted_crtcs);
989 	return 0;
990 
991 err_free_crtc_array:
992 	kfree(sorted_crtcs);
993 	return ret;
994 }
995 
996 static int
997 vc4_core_clock_atomic_check(struct drm_atomic_state *state)
998 {
999 	struct vc4_dev *vc4 = to_vc4_dev(state->dev);
1000 	struct drm_private_state *priv_state;
1001 	struct vc4_hvs_state *hvs_new_state;
1002 	struct vc4_load_tracker_state *load_state;
1003 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
1004 	struct drm_crtc *crtc;
1005 	unsigned int num_outputs;
1006 	unsigned long pixel_rate;
1007 	unsigned long cob_rate;
1008 	unsigned int i;
1009 
1010 	priv_state = drm_atomic_get_private_obj_state(state,
1011 						      &vc4->load_tracker);
1012 	if (IS_ERR(priv_state))
1013 		return PTR_ERR(priv_state);
1014 
1015 	load_state = to_vc4_load_tracker_state(priv_state);
1016 
1017 	hvs_new_state = vc4_hvs_get_global_state(state);
1018 	if (IS_ERR(hvs_new_state))
1019 		return PTR_ERR(hvs_new_state);
1020 
1021 	for_each_oldnew_crtc_in_state(state, crtc,
1022 				      old_crtc_state,
1023 				      new_crtc_state,
1024 				      i) {
1025 		if (old_crtc_state->active) {
1026 			struct vc4_crtc_state *old_vc4_state =
1027 				to_vc4_crtc_state(old_crtc_state);
1028 			unsigned int channel = old_vc4_state->assigned_channel;
1029 
1030 			hvs_new_state->fifo_state[channel].fifo_load = 0;
1031 		}
1032 
1033 		if (new_crtc_state->active) {
1034 			struct vc4_crtc_state *new_vc4_state =
1035 				to_vc4_crtc_state(new_crtc_state);
1036 			unsigned int channel = new_vc4_state->assigned_channel;
1037 
1038 			hvs_new_state->fifo_state[channel].fifo_load =
1039 				new_vc4_state->hvs_load;
1040 		}
1041 	}
1042 
1043 	cob_rate = 0;
1044 	num_outputs = 0;
1045 	for (i = 0; i < HVS_NUM_CHANNELS; i++) {
1046 		if (!hvs_new_state->fifo_state[i].in_use)
1047 			continue;
1048 
1049 		num_outputs++;
1050 		cob_rate = max_t(unsigned long,
1051 				 hvs_new_state->fifo_state[i].fifo_load,
1052 				 cob_rate);
1053 	}
1054 
1055 	pixel_rate = load_state->hvs_load;
1056 	if (num_outputs > 1) {
1057 		pixel_rate = (pixel_rate * 40) / 100;
1058 	} else {
1059 		pixel_rate = (pixel_rate * 60) / 100;
1060 	}
1061 
1062 	hvs_new_state->core_clock_rate = max(cob_rate, pixel_rate);
1063 
1064 	return 0;
1065 }
1066 
1067 
1068 static int
1069 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
1070 {
1071 	int ret;
1072 
1073 	ret = vc4_pv_muxing_atomic_check(dev, state);
1074 	if (ret)
1075 		return ret;
1076 
1077 	ret = vc4_ctm_atomic_check(dev, state);
1078 	if (ret < 0)
1079 		return ret;
1080 
1081 	ret = drm_atomic_helper_check(dev, state);
1082 	if (ret)
1083 		return ret;
1084 
1085 	ret = vc4_load_tracker_atomic_check(state);
1086 	if (ret)
1087 		return ret;
1088 
1089 	return vc4_core_clock_atomic_check(state);
1090 }
1091 
1092 static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = {
1093 	.atomic_commit_setup	= vc4_atomic_commit_setup,
1094 	.atomic_commit_tail	= vc4_atomic_commit_tail,
1095 };
1096 
1097 static const struct drm_mode_config_funcs vc4_mode_funcs = {
1098 	.atomic_check = vc4_atomic_check,
1099 	.atomic_commit = drm_atomic_helper_commit,
1100 	.fb_create = vc4_fb_create,
1101 };
1102 
1103 static const struct drm_mode_config_funcs vc5_mode_funcs = {
1104 	.atomic_check = vc4_atomic_check,
1105 	.atomic_commit = drm_atomic_helper_commit,
1106 	.fb_create = drm_gem_fb_create,
1107 };
1108 
1109 int vc4_kms_load(struct drm_device *dev)
1110 {
1111 	struct vc4_dev *vc4 = to_vc4_dev(dev);
1112 	int ret;
1113 
1114 	/*
1115 	 * The limits enforced by the load tracker aren't relevant for
1116 	 * the BCM2711, but the load tracker computations are used for
1117 	 * the core clock rate calculation.
1118 	 */
1119 	if (vc4->gen == VC4_GEN_4) {
1120 		/* Start with the load tracker enabled. Can be
1121 		 * disabled through the debugfs load_tracker file.
1122 		 */
1123 		vc4->load_tracker_enabled = true;
1124 	}
1125 
1126 	/* Set support for vblank irq fast disable, before drm_vblank_init() */
1127 	dev->vblank_disable_immediate = true;
1128 
1129 	ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
1130 	if (ret < 0) {
1131 		dev_err(dev->dev, "failed to initialize vblank\n");
1132 		return ret;
1133 	}
1134 
1135 	if (vc4->gen >= VC4_GEN_6_C) {
1136 		dev->mode_config.max_width = 8192;
1137 		dev->mode_config.max_height = 8192;
1138 	} else if (vc4->gen >= VC4_GEN_5) {
1139 		dev->mode_config.max_width = 7680;
1140 		dev->mode_config.max_height = 7680;
1141 	} else {
1142 		dev->mode_config.max_width = 2048;
1143 		dev->mode_config.max_height = 2048;
1144 	}
1145 
1146 	dev->mode_config.funcs = (vc4->gen > VC4_GEN_4) ? &vc5_mode_funcs : &vc4_mode_funcs;
1147 	dev->mode_config.helper_private = &vc4_mode_config_helpers;
1148 	dev->mode_config.preferred_depth = 24;
1149 	dev->mode_config.async_page_flip = true;
1150 	dev->mode_config.normalize_zpos = true;
1151 
1152 	ret = vc4_ctm_obj_init(vc4);
1153 	if (ret)
1154 		return ret;
1155 
1156 	ret = vc4_load_tracker_obj_init(vc4);
1157 	if (ret)
1158 		return ret;
1159 
1160 	ret = vc4_hvs_channels_obj_init(vc4);
1161 	if (ret)
1162 		return ret;
1163 
1164 	drm_mode_config_reset(dev);
1165 
1166 	drm_kms_helper_poll_init(dev);
1167 
1168 	return 0;
1169 }
1170