xref: /linux/drivers/gpu/drm/i915/display/intel_bw.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_atomic_state_helper.h>
7 #include <drm/drm_print.h>
8 
9 #include "soc/intel_dram.h"
10 
11 #include "i915_drv.h"
12 #include "i915_reg.h"
13 #include "intel_bw.h"
14 #include "intel_crtc.h"
15 #include "intel_display_core.h"
16 #include "intel_display_regs.h"
17 #include "intel_display_types.h"
18 #include "intel_display_utils.h"
19 #include "intel_mchbar_regs.h"
20 #include "intel_pcode.h"
21 #include "intel_uncore.h"
22 #include "skl_watermark.h"
23 
24 struct intel_bw_state {
25 	struct intel_global_state base;
26 
27 	/*
28 	 * Contains a bit mask, used to determine, whether correspondent
29 	 * pipe allows SAGV or not.
30 	 */
31 	u8 pipe_sagv_reject;
32 
33 	/* bitmask of active pipes */
34 	u8 active_pipes;
35 
36 	/*
37 	 * From MTL onwards, to lock a QGV point, punit expects the peak BW of
38 	 * the selected QGV point as the parameter in multiples of 100MB/s
39 	 */
40 	u16 qgv_point_peakbw;
41 
42 	/*
43 	 * Current QGV points mask, which restricts
44 	 * some particular SAGV states, not to confuse
45 	 * with pipe_sagv_mask.
46 	 */
47 	u16 qgv_points_mask;
48 
49 	unsigned int data_rate[I915_MAX_PIPES];
50 	u8 num_active_planes[I915_MAX_PIPES];
51 };
52 
53 /* Parameters for Qclk Geyserville (QGV) */
54 struct intel_qgv_point {
55 	u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
56 };
57 
58 #define DEPROGBWPCLIMIT		60
59 
60 struct intel_psf_gv_point {
61 	u8 clk; /* clock in multiples of 16.6666 MHz */
62 };
63 
64 struct intel_qgv_info {
65 	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
66 	struct intel_psf_gv_point psf_points[I915_NUM_PSF_GV_POINTS];
67 	u8 num_points;
68 	u8 num_psf_points;
69 	u8 t_bl;
70 	u8 max_numchannels;
71 	u8 channel_width;
72 	u8 deinterleave;
73 };
74 
75 static int dg1_mchbar_read_qgv_point_info(struct intel_display *display,
76 					  struct intel_qgv_point *sp,
77 					  int point)
78 {
79 	struct drm_i915_private *i915 = to_i915(display->drm);
80 	u32 dclk_ratio, dclk_reference;
81 	u32 val;
82 
83 	val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
84 	dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
85 	if (val & DG1_QCLK_REFERENCE)
86 		dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
87 	else
88 		dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
89 	sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
90 
91 	val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
92 	if (val & DG1_GEAR_TYPE)
93 		sp->dclk *= 2;
94 
95 	if (sp->dclk == 0)
96 		return -EINVAL;
97 
98 	val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
99 	sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
100 	sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
101 
102 	val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
103 	sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
104 	sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
105 
106 	sp->t_rc = sp->t_rp + sp->t_ras;
107 
108 	return 0;
109 }
110 
111 static int icl_pcode_read_qgv_point_info(struct intel_display *display,
112 					 struct intel_qgv_point *sp,
113 					 int point)
114 {
115 	u32 val = 0, val2 = 0;
116 	u16 dclk;
117 	int ret;
118 
119 	ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
120 			       ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
121 			       &val, &val2);
122 	if (ret)
123 		return ret;
124 
125 	dclk = val & 0xffff;
126 	sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0),
127 				1000);
128 	sp->t_rp = (val & 0xff0000) >> 16;
129 	sp->t_rcd = (val & 0xff000000) >> 24;
130 
131 	sp->t_rdpre = val2 & 0xff;
132 	sp->t_ras = (val2 & 0xff00) >> 8;
133 
134 	sp->t_rc = sp->t_rp + sp->t_ras;
135 
136 	return 0;
137 }
138 
139 static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
140 					     struct intel_psf_gv_point *points)
141 {
142 	u32 val = 0;
143 	int ret;
144 	int i;
145 
146 	ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
147 			       ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
148 	if (ret)
149 		return ret;
150 
151 	for (i = 0; i < I915_NUM_PSF_GV_POINTS; i++) {
152 		points[i].clk = val & 0xff;
153 		val >>= 8;
154 	}
155 
156 	return 0;
157 }
158 
159 static u16 icl_qgv_points_mask(struct intel_display *display)
160 {
161 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
162 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
163 	u16 qgv_points = 0, psf_points = 0;
164 
165 	/*
166 	 * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
167 	 * it with failure if we try masking any unadvertised points.
168 	 * So need to operate only with those returned from PCode.
169 	 */
170 	if (num_qgv_points > 0)
171 		qgv_points = GENMASK(num_qgv_points - 1, 0);
172 
173 	if (num_psf_gv_points > 0)
174 		psf_points = GENMASK(num_psf_gv_points - 1, 0);
175 
176 	return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
177 }
178 
179 static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
180 {
181 	return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) &
182 			      ICL_PCODE_REQ_QGV_PT_MASK);
183 }
184 
185 static int icl_pcode_restrict_qgv_points(struct intel_display *display,
186 					 u32 points_mask)
187 {
188 	int ret;
189 
190 	if (DISPLAY_VER(display) >= 14)
191 		return 0;
192 
193 	/* bspec says to keep retrying for at least 1 ms */
194 	ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
195 				  points_mask,
196 				  ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
197 				  ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
198 				  1);
199 
200 	if (ret < 0) {
201 		drm_err(display->drm,
202 			"Failed to disable qgv points (0x%x) points: 0x%x\n",
203 			ret, points_mask);
204 		return ret;
205 	}
206 
207 	display->sagv.status = is_sagv_enabled(display, points_mask) ?
208 		I915_SAGV_ENABLED : I915_SAGV_DISABLED;
209 
210 	return 0;
211 }
212 
213 static int mtl_read_qgv_point_info(struct intel_display *display,
214 				   struct intel_qgv_point *sp, int point)
215 {
216 	struct drm_i915_private *i915 = to_i915(display->drm);
217 	u32 val, val2;
218 	u16 dclk;
219 
220 	val = intel_uncore_read(&i915->uncore,
221 				MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
222 	val2 = intel_uncore_read(&i915->uncore,
223 				 MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
224 	dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
225 	sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
226 	sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
227 	sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);
228 
229 	sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2);
230 	sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2);
231 
232 	sp->t_rc = sp->t_rp + sp->t_ras;
233 
234 	return 0;
235 }
236 
237 static int
238 intel_read_qgv_point_info(struct intel_display *display,
239 			  struct intel_qgv_point *sp,
240 			  int point)
241 {
242 	if (DISPLAY_VER(display) >= 14)
243 		return mtl_read_qgv_point_info(display, sp, point);
244 	else if (display->platform.dg1)
245 		return dg1_mchbar_read_qgv_point_info(display, sp, point);
246 	else
247 		return icl_pcode_read_qgv_point_info(display, sp, point);
248 }
249 
250 static int icl_get_qgv_points(struct intel_display *display,
251 			      const struct dram_info *dram_info,
252 			      struct intel_qgv_info *qi,
253 			      bool is_y_tile)
254 {
255 	int i, ret;
256 
257 	qi->num_points = dram_info->num_qgv_points;
258 	qi->num_psf_points = dram_info->num_psf_gv_points;
259 
260 	if (DISPLAY_VER(display) >= 14) {
261 		switch (dram_info->type) {
262 		case INTEL_DRAM_DDR4:
263 			qi->t_bl = 4;
264 			qi->max_numchannels = 2;
265 			qi->channel_width = 64;
266 			qi->deinterleave = 2;
267 			break;
268 		case INTEL_DRAM_DDR5:
269 			qi->t_bl = 8;
270 			qi->max_numchannels = 4;
271 			qi->channel_width = 32;
272 			qi->deinterleave = 2;
273 			break;
274 		case INTEL_DRAM_LPDDR4:
275 		case INTEL_DRAM_LPDDR5:
276 			qi->t_bl = 16;
277 			qi->max_numchannels = 8;
278 			qi->channel_width = 16;
279 			qi->deinterleave = 4;
280 			break;
281 		case INTEL_DRAM_GDDR:
282 		case INTEL_DRAM_GDDR_ECC:
283 			qi->channel_width = 32;
284 			break;
285 		default:
286 			MISSING_CASE(dram_info->type);
287 			return -EINVAL;
288 		}
289 	} else if (DISPLAY_VER(display) >= 12) {
290 		switch (dram_info->type) {
291 		case INTEL_DRAM_DDR4:
292 			qi->t_bl = is_y_tile ? 8 : 4;
293 			qi->max_numchannels = 2;
294 			qi->channel_width = 64;
295 			qi->deinterleave = is_y_tile ? 1 : 2;
296 			break;
297 		case INTEL_DRAM_DDR5:
298 			qi->t_bl = is_y_tile ? 16 : 8;
299 			qi->max_numchannels = 4;
300 			qi->channel_width = 32;
301 			qi->deinterleave = is_y_tile ? 1 : 2;
302 			break;
303 		case INTEL_DRAM_LPDDR4:
304 			if (display->platform.rocketlake) {
305 				qi->t_bl = 8;
306 				qi->max_numchannels = 4;
307 				qi->channel_width = 32;
308 				qi->deinterleave = 2;
309 				break;
310 			}
311 			fallthrough;
312 		case INTEL_DRAM_LPDDR5:
313 			qi->t_bl = 16;
314 			qi->max_numchannels = 8;
315 			qi->channel_width = 16;
316 			qi->deinterleave = is_y_tile ? 2 : 4;
317 			break;
318 		default:
319 			qi->t_bl = 16;
320 			qi->max_numchannels = 1;
321 			break;
322 		}
323 	} else if (DISPLAY_VER(display) == 11) {
324 		qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8;
325 		qi->max_numchannels = 1;
326 	}
327 
328 	if (drm_WARN_ON(display->drm,
329 			qi->num_points > ARRAY_SIZE(qi->points)))
330 		qi->num_points = ARRAY_SIZE(qi->points);
331 
332 	for (i = 0; i < qi->num_points; i++) {
333 		struct intel_qgv_point *sp = &qi->points[i];
334 
335 		ret = intel_read_qgv_point_info(display, sp, i);
336 		if (ret) {
337 			drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i);
338 			return ret;
339 		}
340 
341 		drm_dbg_kms(display->drm,
342 			    "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
343 			    i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
344 			    sp->t_rcd, sp->t_rc);
345 	}
346 
347 	if (qi->num_psf_points > 0) {
348 		ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points);
349 		if (ret) {
350 			drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
351 			qi->num_psf_points = 0;
352 		}
353 
354 		for (i = 0; i < qi->num_psf_points; i++)
355 			drm_dbg_kms(display->drm,
356 				    "PSF GV %d: CLK=%d\n",
357 				    i, qi->psf_points[i].clk);
358 	}
359 
360 	return 0;
361 }
362 
363 static int adl_calc_psf_bw(int clk)
364 {
365 	/*
366 	 * clk is multiples of 16.666MHz (100/6)
367 	 * According to BSpec PSF GV bandwidth is
368 	 * calculated as BW = 64 * clk * 16.666Mhz
369 	 */
370 	return DIV_ROUND_CLOSEST(64 * clk * 100, 6);
371 }
372 
373 static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
374 {
375 	u16 dclk = 0;
376 	int i;
377 
378 	for (i = 0; i < qi->num_points; i++)
379 		dclk = max(dclk, qi->points[i].dclk);
380 
381 	return dclk;
382 }
383 
384 struct intel_sa_info {
385 	u16 displayrtids;
386 	u8 deburst, deprogbwlimit, derating;
387 };
388 
389 static const struct intel_sa_info icl_sa_info = {
390 	.deburst = 8,
391 	.deprogbwlimit = 25, /* GB/s */
392 	.displayrtids = 128,
393 	.derating = 10,
394 };
395 
396 static const struct intel_sa_info tgl_sa_info = {
397 	.deburst = 16,
398 	.deprogbwlimit = 34, /* GB/s */
399 	.displayrtids = 256,
400 	.derating = 10,
401 };
402 
403 static const struct intel_sa_info rkl_sa_info = {
404 	.deburst = 8,
405 	.deprogbwlimit = 20, /* GB/s */
406 	.displayrtids = 128,
407 	.derating = 10,
408 };
409 
410 static const struct intel_sa_info adls_sa_info = {
411 	.deburst = 16,
412 	.deprogbwlimit = 38, /* GB/s */
413 	.displayrtids = 256,
414 	.derating = 10,
415 };
416 
417 static const struct intel_sa_info adlp_sa_info = {
418 	.deburst = 16,
419 	.deprogbwlimit = 38, /* GB/s */
420 	.displayrtids = 256,
421 	.derating = 20,
422 };
423 
424 static const struct intel_sa_info mtl_sa_info = {
425 	.deburst = 32,
426 	.deprogbwlimit = 38, /* GB/s */
427 	.displayrtids = 256,
428 	.derating = 10,
429 };
430 
431 static const struct intel_sa_info xe2_hpd_sa_info = {
432 	.derating = 30,
433 	.deprogbwlimit = 53,
434 	/* Other values not used by simplified algorithm */
435 };
436 
437 static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
438 	.derating = 45,
439 	.deprogbwlimit = 53,
440 	/* Other values not used by simplified algorithm */
441 };
442 
443 static const struct intel_sa_info xe3lpd_sa_info = {
444 	.deburst = 32,
445 	.deprogbwlimit = 65, /* GB/s */
446 	.displayrtids = 256,
447 	.derating = 10,
448 };
449 
450 static const struct intel_sa_info xe3lpd_3002_sa_info = {
451 	.deburst = 32,
452 	.deprogbwlimit = 22, /* GB/s */
453 	.displayrtids = 256,
454 	.derating = 10,
455 };
456 
457 static int icl_get_bw_info(struct intel_display *display,
458 			   const struct dram_info *dram_info,
459 			   const struct intel_sa_info *sa)
460 {
461 	struct intel_qgv_info qi = {};
462 	bool is_y_tile = true; /* assume y tile may be used */
463 	int num_channels = max_t(u8, 1, dram_info->num_channels);
464 	int ipqdepth, ipqdepthpch = 16;
465 	int dclk_max;
466 	int maxdebw;
467 	int num_groups = ARRAY_SIZE(display->bw.max);
468 	int i, ret;
469 
470 	ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
471 	if (ret) {
472 		drm_dbg_kms(display->drm,
473 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
474 		return ret;
475 	}
476 
477 	dclk_max = icl_sagv_max_dclk(&qi);
478 	maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10);
479 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
480 	qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
481 
482 	for (i = 0; i < num_groups; i++) {
483 		struct intel_bw_info *bi = &display->bw.max[i];
484 		int clpchgroup;
485 		int j;
486 
487 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
488 		bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
489 
490 		bi->num_qgv_points = qi.num_points;
491 		bi->num_psf_gv_points = qi.num_psf_points;
492 
493 		for (j = 0; j < qi.num_points; j++) {
494 			const struct intel_qgv_point *sp = &qi.points[j];
495 			int ct, bw;
496 
497 			/*
498 			 * Max row cycle time
499 			 *
500 			 * FIXME what is the logic behind the
501 			 * assumed burst length?
502 			 */
503 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
504 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
505 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
506 
507 			bi->deratedbw[j] = min(maxdebw,
508 					       bw * (100 - sa->derating) / 100);
509 
510 			drm_dbg_kms(display->drm,
511 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
512 				    i, j, bi->num_planes, bi->deratedbw[j]);
513 		}
514 	}
515 	/*
516 	 * In case if SAGV is disabled in BIOS, we always get 1
517 	 * SAGV point, but we can't send PCode commands to restrict it
518 	 * as it will fail and pointless anyway.
519 	 */
520 	if (qi.num_points == 1)
521 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
522 	else
523 		display->sagv.status = I915_SAGV_ENABLED;
524 
525 	return 0;
526 }
527 
528 static int tgl_get_bw_info(struct intel_display *display,
529 			   const struct dram_info *dram_info,
530 			   const struct intel_sa_info *sa)
531 {
532 	struct intel_qgv_info qi = {};
533 	bool is_y_tile = true; /* assume y tile may be used */
534 	int num_channels = max_t(u8, 1, dram_info->num_channels);
535 	int ipqdepth, ipqdepthpch = 16;
536 	int dclk_max;
537 	int maxdebw, peakbw;
538 	int clperchgroup;
539 	int num_groups = ARRAY_SIZE(display->bw.max);
540 	int i, ret;
541 
542 	ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
543 	if (ret) {
544 		drm_dbg_kms(display->drm,
545 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
546 		return ret;
547 	}
548 
549 	if (DISPLAY_VER(display) < 14 &&
550 	    (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
551 		num_channels *= 2;
552 
553 	qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
554 
555 	if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12)
556 		qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
557 
558 	if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
559 		drm_warn(display->drm, "Number of channels exceeds max number of channels.");
560 	if (qi.max_numchannels != 0)
561 		num_channels = min_t(u8, num_channels, qi.max_numchannels);
562 
563 	dclk_max = icl_sagv_max_dclk(&qi);
564 
565 	peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max;
566 	maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 100);
567 
568 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
569 	/*
570 	 * clperchgroup = 4kpagespermempage * clperchperblock,
571 	 * clperchperblock = 8 / num_channels * interleave
572 	 */
573 	clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
574 
575 	for (i = 0; i < num_groups; i++) {
576 		struct intel_bw_info *bi = &display->bw.max[i];
577 		struct intel_bw_info *bi_next;
578 		int clpchgroup;
579 		int j;
580 
581 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
582 
583 		if (i < num_groups - 1) {
584 			bi_next = &display->bw.max[i + 1];
585 
586 			if (clpchgroup < clperchgroup)
587 				bi_next->num_planes = (ipqdepth - clpchgroup) /
588 						       clpchgroup + 1;
589 			else
590 				bi_next->num_planes = 0;
591 		}
592 
593 		bi->num_qgv_points = qi.num_points;
594 		bi->num_psf_gv_points = qi.num_psf_points;
595 
596 		for (j = 0; j < qi.num_points; j++) {
597 			const struct intel_qgv_point *sp = &qi.points[j];
598 			int ct, bw;
599 
600 			/*
601 			 * Max row cycle time
602 			 *
603 			 * FIXME what is the logic behind the
604 			 * assumed burst length?
605 			 */
606 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
607 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
608 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
609 
610 			bi->deratedbw[j] = min(maxdebw,
611 					       bw * (100 - sa->derating) / 100);
612 			bi->peakbw[j] = DIV_ROUND_CLOSEST(sp->dclk *
613 							  num_channels *
614 							  qi.channel_width, 8);
615 
616 			drm_dbg_kms(display->drm,
617 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
618 				    i, j, bi->num_planes, bi->deratedbw[j],
619 				    bi->peakbw[j]);
620 		}
621 
622 		for (j = 0; j < qi.num_psf_points; j++) {
623 			const struct intel_psf_gv_point *sp = &qi.psf_points[j];
624 
625 			bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
626 
627 			drm_dbg_kms(display->drm,
628 				    "BW%d / PSF GV %d: num_planes=%d bw=%u\n",
629 				    i, j, bi->num_planes, bi->psf_bw[j]);
630 		}
631 	}
632 
633 	/*
634 	 * In case if SAGV is disabled in BIOS, we always get 1
635 	 * SAGV point, but we can't send PCode commands to restrict it
636 	 * as it will fail and pointless anyway.
637 	 */
638 	if (qi.num_points == 1)
639 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
640 	else
641 		display->sagv.status = I915_SAGV_ENABLED;
642 
643 	return 0;
644 }
645 
646 static void dg2_get_bw_info(struct intel_display *display)
647 {
648 	unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000;
649 	int num_groups = ARRAY_SIZE(display->bw.max);
650 	int i;
651 
652 	/*
653 	 * DG2 doesn't have SAGV or QGV points, just a constant max bandwidth
654 	 * that doesn't depend on the number of planes enabled. So fill all the
655 	 * plane group with constant bw information for uniformity with other
656 	 * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth,
657 	 * whereas DG2-G11 platforms have 38 GB/s.
658 	 */
659 	for (i = 0; i < num_groups; i++) {
660 		struct intel_bw_info *bi = &display->bw.max[i];
661 
662 		bi->num_planes = 1;
663 		/* Need only one dummy QGV point per group */
664 		bi->num_qgv_points = 1;
665 		bi->deratedbw[0] = deratedbw;
666 	}
667 
668 	display->sagv.status = I915_SAGV_NOT_CONTROLLED;
669 }
670 
671 static int xe2_hpd_get_bw_info(struct intel_display *display,
672 			       const struct dram_info *dram_info,
673 			       const struct intel_sa_info *sa)
674 {
675 	struct intel_qgv_info qi = {};
676 	int num_channels = dram_info->num_channels;
677 	int peakbw, maxdebw;
678 	int ret, i;
679 
680 	ret = icl_get_qgv_points(display, dram_info, &qi, true);
681 	if (ret) {
682 		drm_dbg_kms(display->drm,
683 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
684 		return ret;
685 	}
686 
687 	peakbw = num_channels * qi.channel_width / 8 * icl_sagv_max_dclk(&qi);
688 	maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 10);
689 
690 	for (i = 0; i < qi.num_points; i++) {
691 		const struct intel_qgv_point *point = &qi.points[i];
692 		int bw = num_channels * (qi.channel_width / 8) * point->dclk;
693 
694 		display->bw.max[0].deratedbw[i] =
695 			min(maxdebw, (100 - sa->derating) * bw / 100);
696 		display->bw.max[0].peakbw[i] = bw;
697 
698 		drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
699 			    i, display->bw.max[0].deratedbw[i],
700 			    display->bw.max[0].peakbw[i]);
701 	}
702 
703 	/* Bandwidth does not depend on # of planes; set all groups the same */
704 	display->bw.max[0].num_planes = 1;
705 	display->bw.max[0].num_qgv_points = qi.num_points;
706 	for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
707 		memcpy(&display->bw.max[i], &display->bw.max[0],
708 		       sizeof(display->bw.max[0]));
709 
710 	/*
711 	 * Xe2_HPD should always have exactly two QGV points representing
712 	 * battery and plugged-in operation.
713 	 */
714 	drm_WARN_ON(display->drm, qi.num_points != 2);
715 	display->sagv.status = I915_SAGV_ENABLED;
716 
717 	return 0;
718 }
719 
720 static unsigned int icl_max_bw_index(struct intel_display *display,
721 				     int num_planes, int qgv_point)
722 {
723 	int i;
724 
725 	/*
726 	 * Let's return max bw for 0 planes
727 	 */
728 	num_planes = max(1, num_planes);
729 
730 	for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) {
731 		const struct intel_bw_info *bi =
732 			&display->bw.max[i];
733 
734 		/*
735 		 * Pcode will not expose all QGV points when
736 		 * SAGV is forced to off/min/med/max.
737 		 */
738 		if (qgv_point >= bi->num_qgv_points)
739 			return UINT_MAX;
740 
741 		if (num_planes >= bi->num_planes)
742 			return i;
743 	}
744 
745 	return UINT_MAX;
746 }
747 
748 static unsigned int tgl_max_bw_index(struct intel_display *display,
749 				     int num_planes, int qgv_point)
750 {
751 	int i;
752 
753 	/*
754 	 * Let's return max bw for 0 planes
755 	 */
756 	num_planes = max(1, num_planes);
757 
758 	for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) {
759 		const struct intel_bw_info *bi =
760 			&display->bw.max[i];
761 
762 		/*
763 		 * Pcode will not expose all QGV points when
764 		 * SAGV is forced to off/min/med/max.
765 		 */
766 		if (qgv_point >= bi->num_qgv_points)
767 			return UINT_MAX;
768 
769 		if (num_planes <= bi->num_planes)
770 			return i;
771 	}
772 
773 	return 0;
774 }
775 
776 static unsigned int adl_psf_bw(struct intel_display *display,
777 			       int psf_gv_point)
778 {
779 	const struct intel_bw_info *bi =
780 			&display->bw.max[0];
781 
782 	return bi->psf_bw[psf_gv_point];
783 }
784 
785 static unsigned int icl_qgv_bw(struct intel_display *display,
786 			       int num_active_planes, int qgv_point)
787 {
788 	unsigned int idx;
789 
790 	if (DISPLAY_VER(display) >= 12)
791 		idx = tgl_max_bw_index(display, num_active_planes, qgv_point);
792 	else
793 		idx = icl_max_bw_index(display, num_active_planes, qgv_point);
794 
795 	if (idx >= ARRAY_SIZE(display->bw.max))
796 		return 0;
797 
798 	return display->bw.max[idx].deratedbw[qgv_point];
799 }
800 
801 void intel_bw_init_hw(struct intel_display *display)
802 {
803 	const struct dram_info *dram_info = intel_dram_info(display->drm);
804 
805 	if (!HAS_DISPLAY(display))
806 		return;
807 
808 	/*
809 	 * Starting with Xe3p_LPD, the hardware tells us whether memory has ECC
810 	 * enabled that would impact display bandwidth.  However, so far there
811 	 * are no instructions in Bspec on how to handle that case.  Let's
812 	 * complain if we ever find such a scenario.
813 	 */
814 	if (DISPLAY_VER(display) >= 35)
815 		drm_WARN_ON(display->drm, dram_info->ecc_impacting_de_bw);
816 
817 	if (DISPLAY_VER(display) >= 30) {
818 		if (DISPLAY_VERx100(display) == 3002)
819 			tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
820 		else
821 			tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
822 	} else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) {
823 		if (dram_info->type == INTEL_DRAM_GDDR_ECC)
824 			xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
825 		else
826 			xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
827 	} else if (DISPLAY_VER(display) >= 14) {
828 		tgl_get_bw_info(display, dram_info, &mtl_sa_info);
829 	} else if (display->platform.dg2) {
830 		dg2_get_bw_info(display);
831 	} else if (display->platform.alderlake_p) {
832 		tgl_get_bw_info(display, dram_info, &adlp_sa_info);
833 	} else if (display->platform.alderlake_s) {
834 		tgl_get_bw_info(display, dram_info, &adls_sa_info);
835 	} else if (display->platform.rocketlake) {
836 		tgl_get_bw_info(display, dram_info, &rkl_sa_info);
837 	} else if (DISPLAY_VER(display) == 12) {
838 		tgl_get_bw_info(display, dram_info, &tgl_sa_info);
839 	} else if (DISPLAY_VER(display) == 11) {
840 		icl_get_bw_info(display, dram_info, &icl_sa_info);
841 	}
842 }
843 
844 static unsigned int intel_bw_num_active_planes(struct intel_display *display,
845 					       const struct intel_bw_state *bw_state)
846 {
847 	unsigned int num_active_planes = 0;
848 	enum pipe pipe;
849 
850 	for_each_pipe(display, pipe)
851 		num_active_planes += bw_state->num_active_planes[pipe];
852 
853 	return num_active_planes;
854 }
855 
856 static unsigned int intel_bw_data_rate(struct intel_display *display,
857 				       const struct intel_bw_state *bw_state)
858 {
859 	unsigned int data_rate = 0;
860 	enum pipe pipe;
861 
862 	for_each_pipe(display, pipe)
863 		data_rate += bw_state->data_rate[pipe];
864 
865 	if (DISPLAY_VER(display) >= 13 && intel_display_vtd_active(display))
866 		data_rate = DIV_ROUND_UP(data_rate * 105, 100);
867 
868 	return data_rate;
869 }
870 
871 struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state)
872 {
873 	return container_of(obj_state, struct intel_bw_state, base);
874 }
875 
876 struct intel_bw_state *
877 intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
878 {
879 	struct intel_display *display = to_intel_display(state);
880 	struct intel_global_state *bw_state;
881 
882 	bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj);
883 
884 	return to_intel_bw_state(bw_state);
885 }
886 
887 struct intel_bw_state *
888 intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
889 {
890 	struct intel_display *display = to_intel_display(state);
891 	struct intel_global_state *bw_state;
892 
893 	bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj);
894 
895 	return to_intel_bw_state(bw_state);
896 }
897 
898 struct intel_bw_state *
899 intel_atomic_get_bw_state(struct intel_atomic_state *state)
900 {
901 	struct intel_display *display = to_intel_display(state);
902 	struct intel_global_state *bw_state;
903 
904 	bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj);
905 	if (IS_ERR(bw_state))
906 		return ERR_CAST(bw_state);
907 
908 	return to_intel_bw_state(bw_state);
909 }
910 
911 static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display,
912 					      int num_active_planes)
913 {
914 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
915 	unsigned int max_bw_point = 0;
916 	unsigned int max_bw = 0;
917 	int i;
918 
919 	for (i = 0; i < num_qgv_points; i++) {
920 		unsigned int max_data_rate =
921 			icl_qgv_bw(display, num_active_planes, i);
922 
923 		/*
924 		 * We need to know which qgv point gives us
925 		 * maximum bandwidth in order to disable SAGV
926 		 * if we find that we exceed SAGV block time
927 		 * with watermarks. By that moment we already
928 		 * have those, as it is calculated earlier in
929 		 * intel_atomic_check,
930 		 */
931 		if (max_data_rate > max_bw) {
932 			max_bw_point = BIT(i);
933 			max_bw = max_data_rate;
934 		}
935 	}
936 
937 	return max_bw_point;
938 }
939 
940 static u16 icl_prepare_qgv_points_mask(struct intel_display *display,
941 				       unsigned int qgv_points,
942 				       unsigned int psf_points)
943 {
944 	return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
945 		 ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display);
946 }
947 
948 static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display)
949 {
950 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
951 	unsigned int max_bw_point_mask = 0;
952 	unsigned int max_bw = 0;
953 	int i;
954 
955 	for (i = 0; i < num_psf_gv_points; i++) {
956 		unsigned int max_data_rate = adl_psf_bw(display, i);
957 
958 		if (max_data_rate > max_bw) {
959 			max_bw_point_mask = BIT(i);
960 			max_bw = max_data_rate;
961 		} else if (max_data_rate == max_bw) {
962 			max_bw_point_mask |= BIT(i);
963 		}
964 	}
965 
966 	return max_bw_point_mask;
967 }
968 
969 static void icl_force_disable_sagv(struct intel_display *display,
970 				   struct intel_bw_state *bw_state)
971 {
972 	unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0);
973 	unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display);
974 
975 	bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
976 								qgv_points,
977 								psf_points);
978 
979 	drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n",
980 		    bw_state->qgv_points_mask);
981 
982 	icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
983 }
984 
985 void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
986 {
987 	struct intel_display *display = to_intel_display(state);
988 	const struct intel_bw_state *old_bw_state =
989 		intel_atomic_get_old_bw_state(state);
990 	const struct intel_bw_state *new_bw_state =
991 		intel_atomic_get_new_bw_state(state);
992 	u16 old_mask, new_mask;
993 
994 	if (!new_bw_state)
995 		return;
996 
997 	old_mask = old_bw_state->qgv_points_mask;
998 	new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
999 
1000 	if (old_mask == new_mask)
1001 		return;
1002 
1003 	WARN_ON(!new_bw_state->base.changed);
1004 
1005 	drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
1006 		    old_mask, new_mask);
1007 
1008 	/*
1009 	 * Restrict required qgv points before updating the configuration.
1010 	 * According to BSpec we can't mask and unmask qgv points at the same
1011 	 * time. Also masking should be done before updating the configuration
1012 	 * and unmasking afterwards.
1013 	 */
1014 	icl_pcode_restrict_qgv_points(display, new_mask);
1015 }
1016 
1017 void icl_sagv_post_plane_update(struct intel_atomic_state *state)
1018 {
1019 	struct intel_display *display = to_intel_display(state);
1020 	const struct intel_bw_state *old_bw_state =
1021 		intel_atomic_get_old_bw_state(state);
1022 	const struct intel_bw_state *new_bw_state =
1023 		intel_atomic_get_new_bw_state(state);
1024 	u16 old_mask, new_mask;
1025 
1026 	if (!new_bw_state)
1027 		return;
1028 
1029 	old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
1030 	new_mask = new_bw_state->qgv_points_mask;
1031 
1032 	if (old_mask == new_mask)
1033 		return;
1034 
1035 	WARN_ON(!new_bw_state->base.changed);
1036 
1037 	drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
1038 		    old_mask, new_mask);
1039 
1040 	/*
1041 	 * Allow required qgv points after updating the configuration.
1042 	 * According to BSpec we can't mask and unmask qgv points at the same
1043 	 * time. Also masking should be done before updating the configuration
1044 	 * and unmasking afterwards.
1045 	 */
1046 	icl_pcode_restrict_qgv_points(display, new_mask);
1047 }
1048 
1049 static int mtl_find_qgv_points(struct intel_display *display,
1050 			       unsigned int data_rate,
1051 			       unsigned int num_active_planes,
1052 			       struct intel_bw_state *new_bw_state)
1053 {
1054 	unsigned int best_rate = UINT_MAX;
1055 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
1056 	unsigned int qgv_peak_bw  = 0;
1057 	int i;
1058 	int ret;
1059 
1060 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1061 	if (ret)
1062 		return ret;
1063 
1064 	/*
1065 	 * If SAGV cannot be enabled, disable the pcode SAGV by passing all 1's
1066 	 * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
1067 	 * not enabled. PM Demand code will clamp the value for the register
1068 	 */
1069 	if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
1070 		new_bw_state->qgv_point_peakbw = U16_MAX;
1071 		drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
1072 		return 0;
1073 	}
1074 
1075 	/*
1076 	 * Find the best QGV point by comparing the data_rate with max data rate
1077 	 * offered per plane group
1078 	 */
1079 	for (i = 0; i < num_qgv_points; i++) {
1080 		unsigned int bw_index =
1081 			tgl_max_bw_index(display, num_active_planes, i);
1082 		unsigned int max_data_rate;
1083 
1084 		if (bw_index >= ARRAY_SIZE(display->bw.max))
1085 			continue;
1086 
1087 		max_data_rate = display->bw.max[bw_index].deratedbw[i];
1088 
1089 		if (max_data_rate < data_rate)
1090 			continue;
1091 
1092 		if (max_data_rate - data_rate < best_rate) {
1093 			best_rate = max_data_rate - data_rate;
1094 			qgv_peak_bw = display->bw.max[bw_index].peakbw[i];
1095 		}
1096 
1097 		drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
1098 			    i, max_data_rate, data_rate, qgv_peak_bw);
1099 	}
1100 
1101 	drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
1102 		    qgv_peak_bw, data_rate);
1103 
1104 	/*
1105 	 * The display configuration cannot be supported if no QGV point
1106 	 * satisfying the required data rate is found
1107 	 */
1108 	if (qgv_peak_bw == 0) {
1109 		drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
1110 			    data_rate, num_active_planes);
1111 		return -EINVAL;
1112 	}
1113 
1114 	/* MTL PM DEMAND expects QGV BW parameter in multiples of 100 mbps */
1115 	new_bw_state->qgv_point_peakbw = DIV_ROUND_CLOSEST(qgv_peak_bw, 100);
1116 
1117 	return 0;
1118 }
1119 
1120 static int icl_find_qgv_points(struct intel_display *display,
1121 			       unsigned int data_rate,
1122 			       unsigned int num_active_planes,
1123 			       const struct intel_bw_state *old_bw_state,
1124 			       struct intel_bw_state *new_bw_state)
1125 {
1126 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
1127 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
1128 	u16 psf_points = 0;
1129 	u16 qgv_points = 0;
1130 	int i;
1131 	int ret;
1132 
1133 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1134 	if (ret)
1135 		return ret;
1136 
1137 	for (i = 0; i < num_qgv_points; i++) {
1138 		unsigned int max_data_rate = icl_qgv_bw(display,
1139 							num_active_planes, i);
1140 		if (max_data_rate >= data_rate)
1141 			qgv_points |= BIT(i);
1142 
1143 		drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
1144 			    i, max_data_rate, data_rate);
1145 	}
1146 
1147 	for (i = 0; i < num_psf_gv_points; i++) {
1148 		unsigned int max_data_rate = adl_psf_bw(display, i);
1149 
1150 		if (max_data_rate >= data_rate)
1151 			psf_points |= BIT(i);
1152 
1153 		drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d"
1154 			    " required %d\n",
1155 			    i, max_data_rate, data_rate);
1156 	}
1157 
1158 	/*
1159 	 * BSpec states that we always should have at least one allowed point
1160 	 * left, so if we couldn't - simply reject the configuration for obvious
1161 	 * reasons.
1162 	 */
1163 	if (qgv_points == 0) {
1164 		drm_dbg_kms(display->drm, "No QGV points provide sufficient memory"
1165 			    " bandwidth %d for display configuration(%d active planes).\n",
1166 			    data_rate, num_active_planes);
1167 		return -EINVAL;
1168 	}
1169 
1170 	if (num_psf_gv_points > 0 && psf_points == 0) {
1171 		drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory"
1172 			    " bandwidth %d for display configuration(%d active planes).\n",
1173 			    data_rate, num_active_planes);
1174 		return -EINVAL;
1175 	}
1176 
1177 	/*
1178 	 * Leave only single point with highest bandwidth, if
1179 	 * we can't enable SAGV due to the increased memory latency it may
1180 	 * cause.
1181 	 */
1182 	if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
1183 		qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
1184 		drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
1185 			    qgv_points);
1186 	}
1187 
1188 	/*
1189 	 * We store the ones which need to be masked as that is what PCode
1190 	 * actually accepts as a parameter.
1191 	 */
1192 	new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
1193 								    qgv_points,
1194 								    psf_points);
1195 	/*
1196 	 * If the actual mask had changed we need to make sure that
1197 	 * the commits are serialized(in case this is a nomodeset, nonblocking)
1198 	 */
1199 	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
1200 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1201 		if (ret)
1202 			return ret;
1203 	}
1204 
1205 	return 0;
1206 }
1207 
1208 static int intel_bw_check_qgv_points(struct intel_display *display,
1209 				     const struct intel_bw_state *old_bw_state,
1210 				     struct intel_bw_state *new_bw_state)
1211 {
1212 	unsigned int data_rate = intel_bw_data_rate(display, new_bw_state);
1213 	unsigned int num_active_planes =
1214 			intel_bw_num_active_planes(display, new_bw_state);
1215 
1216 	data_rate = DIV_ROUND_UP(data_rate, 1000);
1217 
1218 	if (DISPLAY_VER(display) >= 14)
1219 		return mtl_find_qgv_points(display, data_rate, num_active_planes,
1220 					   new_bw_state);
1221 	else
1222 		return icl_find_qgv_points(display, data_rate, num_active_planes,
1223 					   old_bw_state, new_bw_state);
1224 }
1225 
1226 static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
1227 {
1228 	struct intel_display *display = to_intel_display(state);
1229 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1230 	struct intel_crtc *crtc;
1231 	int i;
1232 
1233 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1234 					    new_crtc_state, i) {
1235 		unsigned int old_data_rate =
1236 			intel_crtc_bw_data_rate(old_crtc_state);
1237 		unsigned int new_data_rate =
1238 			intel_crtc_bw_data_rate(new_crtc_state);
1239 		unsigned int old_active_planes =
1240 			intel_crtc_bw_num_active_planes(old_crtc_state);
1241 		unsigned int new_active_planes =
1242 			intel_crtc_bw_num_active_planes(new_crtc_state);
1243 		struct intel_bw_state *new_bw_state;
1244 
1245 		/*
1246 		 * Avoid locking the bw state when
1247 		 * nothing significant has changed.
1248 		 */
1249 		if (old_data_rate == new_data_rate &&
1250 		    old_active_planes == new_active_planes)
1251 			continue;
1252 
1253 		new_bw_state = intel_atomic_get_bw_state(state);
1254 		if (IS_ERR(new_bw_state))
1255 			return PTR_ERR(new_bw_state);
1256 
1257 		new_bw_state->data_rate[crtc->pipe] = new_data_rate;
1258 		new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
1259 
1260 		*changed = true;
1261 
1262 		drm_dbg_kms(display->drm,
1263 			    "[CRTC:%d:%s] data rate %u num active planes %u\n",
1264 			    crtc->base.base.id, crtc->base.name,
1265 			    new_bw_state->data_rate[crtc->pipe],
1266 			    new_bw_state->num_active_planes[crtc->pipe]);
1267 	}
1268 
1269 	return 0;
1270 }
1271 
1272 static int intel_bw_modeset_checks(struct intel_atomic_state *state)
1273 {
1274 	const struct intel_bw_state *old_bw_state;
1275 	struct intel_bw_state *new_bw_state;
1276 	int ret;
1277 
1278 	if (!intel_any_crtc_active_changed(state))
1279 		return 0;
1280 
1281 	new_bw_state = intel_atomic_get_bw_state(state);
1282 	if (IS_ERR(new_bw_state))
1283 		return PTR_ERR(new_bw_state);
1284 
1285 	old_bw_state = intel_atomic_get_old_bw_state(state);
1286 
1287 	new_bw_state->active_pipes =
1288 		intel_calc_active_pipes(state, old_bw_state->active_pipes);
1289 
1290 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1291 	if (ret)
1292 		return ret;
1293 
1294 	return 0;
1295 }
1296 
1297 static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
1298 {
1299 	struct intel_display *display = to_intel_display(state);
1300 	const struct intel_crtc_state *old_crtc_state;
1301 	const struct intel_crtc_state *new_crtc_state;
1302 	const struct intel_bw_state *old_bw_state = NULL;
1303 	struct intel_bw_state *new_bw_state = NULL;
1304 	struct intel_crtc *crtc;
1305 	int ret, i;
1306 
1307 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1308 					    new_crtc_state, i) {
1309 		if (intel_crtc_can_enable_sagv(old_crtc_state) ==
1310 		    intel_crtc_can_enable_sagv(new_crtc_state))
1311 			continue;
1312 
1313 		new_bw_state = intel_atomic_get_bw_state(state);
1314 		if (IS_ERR(new_bw_state))
1315 			return PTR_ERR(new_bw_state);
1316 
1317 		old_bw_state = intel_atomic_get_old_bw_state(state);
1318 
1319 		if (intel_crtc_can_enable_sagv(new_crtc_state))
1320 			new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
1321 		else
1322 			new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
1323 	}
1324 
1325 	if (!new_bw_state)
1326 		return 0;
1327 
1328 	if (intel_bw_can_enable_sagv(display, new_bw_state) !=
1329 	    intel_bw_can_enable_sagv(display, old_bw_state)) {
1330 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1331 		if (ret)
1332 			return ret;
1333 	} else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
1334 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
1335 		if (ret)
1336 			return ret;
1337 	}
1338 
1339 	return 0;
1340 }
1341 
1342 int intel_bw_atomic_check(struct intel_atomic_state *state)
1343 {
1344 	struct intel_display *display = to_intel_display(state);
1345 	bool changed = false;
1346 	struct intel_bw_state *new_bw_state;
1347 	const struct intel_bw_state *old_bw_state;
1348 	int ret;
1349 
1350 	if (DISPLAY_VER(display) < 9)
1351 		return 0;
1352 
1353 	ret = intel_bw_modeset_checks(state);
1354 	if (ret)
1355 		return ret;
1356 
1357 	ret = intel_bw_check_sagv_mask(state);
1358 	if (ret)
1359 		return ret;
1360 
1361 	/* FIXME earlier gens need some checks too */
1362 	if (DISPLAY_VER(display) < 11)
1363 		return 0;
1364 
1365 	ret = intel_bw_check_data_rate(state, &changed);
1366 	if (ret)
1367 		return ret;
1368 
1369 	old_bw_state = intel_atomic_get_old_bw_state(state);
1370 	new_bw_state = intel_atomic_get_new_bw_state(state);
1371 
1372 	if (new_bw_state &&
1373 	    intel_bw_can_enable_sagv(display, old_bw_state) !=
1374 	    intel_bw_can_enable_sagv(display, new_bw_state))
1375 		changed = true;
1376 
1377 	/*
1378 	 * If none of our inputs (data rates, number of active
1379 	 * planes, SAGV yes/no) changed then nothing to do here.
1380 	 */
1381 	if (!changed)
1382 		return 0;
1383 
1384 	ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state);
1385 	if (ret)
1386 		return ret;
1387 
1388 	return 0;
1389 }
1390 
1391 static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
1392 				 const struct intel_crtc_state *crtc_state)
1393 {
1394 	struct intel_display *display = to_intel_display(crtc_state);
1395 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1396 
1397 	bw_state->data_rate[crtc->pipe] =
1398 		intel_crtc_bw_data_rate(crtc_state);
1399 	bw_state->num_active_planes[crtc->pipe] =
1400 		intel_crtc_bw_num_active_planes(crtc_state);
1401 
1402 	drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
1403 		    pipe_name(crtc->pipe),
1404 		    bw_state->data_rate[crtc->pipe],
1405 		    bw_state->num_active_planes[crtc->pipe]);
1406 }
1407 
1408 void intel_bw_update_hw_state(struct intel_display *display)
1409 {
1410 	struct intel_bw_state *bw_state =
1411 		to_intel_bw_state(display->bw.obj.state);
1412 	struct intel_crtc *crtc;
1413 
1414 	if (DISPLAY_VER(display) < 9)
1415 		return;
1416 
1417 	bw_state->active_pipes = 0;
1418 	bw_state->pipe_sagv_reject = 0;
1419 
1420 	for_each_intel_crtc(display->drm, crtc) {
1421 		const struct intel_crtc_state *crtc_state =
1422 			to_intel_crtc_state(crtc->base.state);
1423 		enum pipe pipe = crtc->pipe;
1424 
1425 		if (crtc_state->hw.active)
1426 			bw_state->active_pipes |= BIT(pipe);
1427 
1428 		if (DISPLAY_VER(display) >= 11)
1429 			intel_bw_crtc_update(bw_state, crtc_state);
1430 
1431 		/* initially SAGV has been forced off */
1432 		bw_state->pipe_sagv_reject |= BIT(pipe);
1433 	}
1434 }
1435 
1436 void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
1437 {
1438 	struct intel_display *display = to_intel_display(crtc);
1439 	struct intel_bw_state *bw_state =
1440 		to_intel_bw_state(display->bw.obj.state);
1441 	enum pipe pipe = crtc->pipe;
1442 
1443 	if (DISPLAY_VER(display) < 9)
1444 		return;
1445 
1446 	bw_state->data_rate[pipe] = 0;
1447 	bw_state->num_active_planes[pipe] = 0;
1448 }
1449 
1450 static struct intel_global_state *
1451 intel_bw_duplicate_state(struct intel_global_obj *obj)
1452 {
1453 	struct intel_bw_state *state;
1454 
1455 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
1456 	if (!state)
1457 		return NULL;
1458 
1459 	return &state->base;
1460 }
1461 
1462 static void intel_bw_destroy_state(struct intel_global_obj *obj,
1463 				   struct intel_global_state *state)
1464 {
1465 	kfree(state);
1466 }
1467 
1468 static const struct intel_global_state_funcs intel_bw_funcs = {
1469 	.atomic_duplicate_state = intel_bw_duplicate_state,
1470 	.atomic_destroy_state = intel_bw_destroy_state,
1471 };
1472 
1473 int intel_bw_init(struct intel_display *display)
1474 {
1475 	struct intel_bw_state *state;
1476 
1477 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1478 	if (!state)
1479 		return -ENOMEM;
1480 
1481 	intel_atomic_global_obj_init(display, &display->bw.obj,
1482 				     &state->base, &intel_bw_funcs);
1483 
1484 	/*
1485 	 * Limit this only if we have SAGV. And for Display version 14 onwards
1486 	 * sagv is handled though pmdemand requests
1487 	 */
1488 	if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
1489 		icl_force_disable_sagv(display, state);
1490 
1491 	return 0;
1492 }
1493 
1494 bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state)
1495 {
1496 	const struct intel_bw_state *new_bw_state, *old_bw_state;
1497 
1498 	new_bw_state = intel_atomic_get_new_bw_state(state);
1499 	old_bw_state = intel_atomic_get_old_bw_state(state);
1500 
1501 	if (new_bw_state &&
1502 	    new_bw_state->qgv_point_peakbw != old_bw_state->qgv_point_peakbw)
1503 		return true;
1504 
1505 	return false;
1506 }
1507 
1508 bool intel_bw_can_enable_sagv(struct intel_display *display,
1509 			      const struct intel_bw_state *bw_state)
1510 {
1511 	if (DISPLAY_VER(display) < 11 &&
1512 	    bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
1513 		return false;
1514 
1515 	return bw_state->pipe_sagv_reject == 0;
1516 }
1517 
1518 int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state)
1519 {
1520 	return bw_state->qgv_point_peakbw;
1521 }
1522