xref: /linux/drivers/gpu/drm/i915/display/intel_bw.c (revision f86ad0ed620cb3c91ec7d5468e93ac68d727539d)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_atomic_state_helper.h>
7 
8 #include "soc/intel_dram.h"
9 
10 #include "i915_drv.h"
11 #include "i915_reg.h"
12 #include "i915_utils.h"
13 #include "intel_atomic.h"
14 #include "intel_bw.h"
15 #include "intel_cdclk.h"
16 #include "intel_display_core.h"
17 #include "intel_display_regs.h"
18 #include "intel_display_types.h"
19 #include "intel_mchbar_regs.h"
20 #include "intel_pcode.h"
21 #include "skl_watermark.h"
22 
23 /* Parameters for Qclk Geyserville (QGV) */
24 struct intel_qgv_point {
25 	u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
26 };
27 
28 #define DEPROGBWPCLIMIT		60
29 
30 struct intel_psf_gv_point {
31 	u8 clk; /* clock in multiples of 16.6666 MHz */
32 };
33 
34 struct intel_qgv_info {
35 	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
36 	struct intel_psf_gv_point psf_points[I915_NUM_PSF_GV_POINTS];
37 	u8 num_points;
38 	u8 num_psf_points;
39 	u8 t_bl;
40 	u8 max_numchannels;
41 	u8 channel_width;
42 	u8 deinterleave;
43 };
44 
45 static int dg1_mchbar_read_qgv_point_info(struct intel_display *display,
46 					  struct intel_qgv_point *sp,
47 					  int point)
48 {
49 	struct drm_i915_private *i915 = to_i915(display->drm);
50 	u32 dclk_ratio, dclk_reference;
51 	u32 val;
52 
53 	val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
54 	dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
55 	if (val & DG1_QCLK_REFERENCE)
56 		dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
57 	else
58 		dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
59 	sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
60 
61 	val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
62 	if (val & DG1_GEAR_TYPE)
63 		sp->dclk *= 2;
64 
65 	if (sp->dclk == 0)
66 		return -EINVAL;
67 
68 	val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
69 	sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
70 	sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
71 
72 	val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
73 	sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
74 	sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
75 
76 	sp->t_rc = sp->t_rp + sp->t_ras;
77 
78 	return 0;
79 }
80 
81 static int icl_pcode_read_qgv_point_info(struct intel_display *display,
82 					 struct intel_qgv_point *sp,
83 					 int point)
84 {
85 	struct drm_i915_private *i915 = to_i915(display->drm);
86 	u32 val = 0, val2 = 0;
87 	u16 dclk;
88 	int ret;
89 
90 	ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
91 			     ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
92 			     &val, &val2);
93 	if (ret)
94 		return ret;
95 
96 	dclk = val & 0xffff;
97 	sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0),
98 				1000);
99 	sp->t_rp = (val & 0xff0000) >> 16;
100 	sp->t_rcd = (val & 0xff000000) >> 24;
101 
102 	sp->t_rdpre = val2 & 0xff;
103 	sp->t_ras = (val2 & 0xff00) >> 8;
104 
105 	sp->t_rc = sp->t_rp + sp->t_ras;
106 
107 	return 0;
108 }
109 
110 static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
111 					     struct intel_psf_gv_point *points)
112 {
113 	struct drm_i915_private *i915 = to_i915(display->drm);
114 	u32 val = 0;
115 	int ret;
116 	int i;
117 
118 	ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
119 			     ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
120 	if (ret)
121 		return ret;
122 
123 	for (i = 0; i < I915_NUM_PSF_GV_POINTS; i++) {
124 		points[i].clk = val & 0xff;
125 		val >>= 8;
126 	}
127 
128 	return 0;
129 }
130 
131 static u16 icl_qgv_points_mask(struct intel_display *display)
132 {
133 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
134 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
135 	u16 qgv_points = 0, psf_points = 0;
136 
137 	/*
138 	 * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
139 	 * it with failure if we try masking any unadvertised points.
140 	 * So need to operate only with those returned from PCode.
141 	 */
142 	if (num_qgv_points > 0)
143 		qgv_points = GENMASK(num_qgv_points - 1, 0);
144 
145 	if (num_psf_gv_points > 0)
146 		psf_points = GENMASK(num_psf_gv_points - 1, 0);
147 
148 	return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
149 }
150 
151 static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
152 {
153 	return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) &
154 			      ICL_PCODE_REQ_QGV_PT_MASK);
155 }
156 
157 int icl_pcode_restrict_qgv_points(struct intel_display *display,
158 				  u32 points_mask)
159 {
160 	struct drm_i915_private *i915 = to_i915(display->drm);
161 	int ret;
162 
163 	if (DISPLAY_VER(display) >= 14)
164 		return 0;
165 
166 	/* bspec says to keep retrying for at least 1 ms */
167 	ret = skl_pcode_request(&i915->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
168 				points_mask,
169 				ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
170 				ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
171 				1);
172 
173 	if (ret < 0) {
174 		drm_err(display->drm,
175 			"Failed to disable qgv points (0x%x) points: 0x%x\n",
176 			ret, points_mask);
177 		return ret;
178 	}
179 
180 	display->sagv.status = is_sagv_enabled(display, points_mask) ?
181 		I915_SAGV_ENABLED : I915_SAGV_DISABLED;
182 
183 	return 0;
184 }
185 
186 static int mtl_read_qgv_point_info(struct intel_display *display,
187 				   struct intel_qgv_point *sp, int point)
188 {
189 	struct drm_i915_private *i915 = to_i915(display->drm);
190 	u32 val, val2;
191 	u16 dclk;
192 
193 	val = intel_uncore_read(&i915->uncore,
194 				MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
195 	val2 = intel_uncore_read(&i915->uncore,
196 				 MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
197 	dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
198 	sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
199 	sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
200 	sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);
201 
202 	sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2);
203 	sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2);
204 
205 	sp->t_rc = sp->t_rp + sp->t_ras;
206 
207 	return 0;
208 }
209 
210 static int
211 intel_read_qgv_point_info(struct intel_display *display,
212 			  struct intel_qgv_point *sp,
213 			  int point)
214 {
215 	if (DISPLAY_VER(display) >= 14)
216 		return mtl_read_qgv_point_info(display, sp, point);
217 	else if (display->platform.dg1)
218 		return dg1_mchbar_read_qgv_point_info(display, sp, point);
219 	else
220 		return icl_pcode_read_qgv_point_info(display, sp, point);
221 }
222 
223 static int icl_get_qgv_points(struct intel_display *display,
224 			      const struct dram_info *dram_info,
225 			      struct intel_qgv_info *qi,
226 			      bool is_y_tile)
227 {
228 	int i, ret;
229 
230 	qi->num_points = dram_info->num_qgv_points;
231 	qi->num_psf_points = dram_info->num_psf_gv_points;
232 
233 	if (DISPLAY_VER(display) >= 14) {
234 		switch (dram_info->type) {
235 		case INTEL_DRAM_DDR4:
236 			qi->t_bl = 4;
237 			qi->max_numchannels = 2;
238 			qi->channel_width = 64;
239 			qi->deinterleave = 2;
240 			break;
241 		case INTEL_DRAM_DDR5:
242 			qi->t_bl = 8;
243 			qi->max_numchannels = 4;
244 			qi->channel_width = 32;
245 			qi->deinterleave = 2;
246 			break;
247 		case INTEL_DRAM_LPDDR4:
248 		case INTEL_DRAM_LPDDR5:
249 			qi->t_bl = 16;
250 			qi->max_numchannels = 8;
251 			qi->channel_width = 16;
252 			qi->deinterleave = 4;
253 			break;
254 		case INTEL_DRAM_GDDR:
255 		case INTEL_DRAM_GDDR_ECC:
256 			qi->channel_width = 32;
257 			break;
258 		default:
259 			MISSING_CASE(dram_info->type);
260 			return -EINVAL;
261 		}
262 	} else if (DISPLAY_VER(display) >= 12) {
263 		switch (dram_info->type) {
264 		case INTEL_DRAM_DDR4:
265 			qi->t_bl = is_y_tile ? 8 : 4;
266 			qi->max_numchannels = 2;
267 			qi->channel_width = 64;
268 			qi->deinterleave = is_y_tile ? 1 : 2;
269 			break;
270 		case INTEL_DRAM_DDR5:
271 			qi->t_bl = is_y_tile ? 16 : 8;
272 			qi->max_numchannels = 4;
273 			qi->channel_width = 32;
274 			qi->deinterleave = is_y_tile ? 1 : 2;
275 			break;
276 		case INTEL_DRAM_LPDDR4:
277 			if (display->platform.rocketlake) {
278 				qi->t_bl = 8;
279 				qi->max_numchannels = 4;
280 				qi->channel_width = 32;
281 				qi->deinterleave = 2;
282 				break;
283 			}
284 			fallthrough;
285 		case INTEL_DRAM_LPDDR5:
286 			qi->t_bl = 16;
287 			qi->max_numchannels = 8;
288 			qi->channel_width = 16;
289 			qi->deinterleave = is_y_tile ? 2 : 4;
290 			break;
291 		default:
292 			qi->t_bl = 16;
293 			qi->max_numchannels = 1;
294 			break;
295 		}
296 	} else if (DISPLAY_VER(display) == 11) {
297 		qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8;
298 		qi->max_numchannels = 1;
299 	}
300 
301 	if (drm_WARN_ON(display->drm,
302 			qi->num_points > ARRAY_SIZE(qi->points)))
303 		qi->num_points = ARRAY_SIZE(qi->points);
304 
305 	for (i = 0; i < qi->num_points; i++) {
306 		struct intel_qgv_point *sp = &qi->points[i];
307 
308 		ret = intel_read_qgv_point_info(display, sp, i);
309 		if (ret) {
310 			drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i);
311 			return ret;
312 		}
313 
314 		drm_dbg_kms(display->drm,
315 			    "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
316 			    i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
317 			    sp->t_rcd, sp->t_rc);
318 	}
319 
320 	if (qi->num_psf_points > 0) {
321 		ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points);
322 		if (ret) {
323 			drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
324 			qi->num_psf_points = 0;
325 		}
326 
327 		for (i = 0; i < qi->num_psf_points; i++)
328 			drm_dbg_kms(display->drm,
329 				    "PSF GV %d: CLK=%d \n",
330 				    i, qi->psf_points[i].clk);
331 	}
332 
333 	return 0;
334 }
335 
336 static int adl_calc_psf_bw(int clk)
337 {
338 	/*
339 	 * clk is multiples of 16.666MHz (100/6)
340 	 * According to BSpec PSF GV bandwidth is
341 	 * calculated as BW = 64 * clk * 16.666Mhz
342 	 */
343 	return DIV_ROUND_CLOSEST(64 * clk * 100, 6);
344 }
345 
346 static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
347 {
348 	u16 dclk = 0;
349 	int i;
350 
351 	for (i = 0; i < qi->num_points; i++)
352 		dclk = max(dclk, qi->points[i].dclk);
353 
354 	return dclk;
355 }
356 
357 struct intel_sa_info {
358 	u16 displayrtids;
359 	u8 deburst, deprogbwlimit, derating;
360 };
361 
362 static const struct intel_sa_info icl_sa_info = {
363 	.deburst = 8,
364 	.deprogbwlimit = 25, /* GB/s */
365 	.displayrtids = 128,
366 	.derating = 10,
367 };
368 
369 static const struct intel_sa_info tgl_sa_info = {
370 	.deburst = 16,
371 	.deprogbwlimit = 34, /* GB/s */
372 	.displayrtids = 256,
373 	.derating = 10,
374 };
375 
376 static const struct intel_sa_info rkl_sa_info = {
377 	.deburst = 8,
378 	.deprogbwlimit = 20, /* GB/s */
379 	.displayrtids = 128,
380 	.derating = 10,
381 };
382 
383 static const struct intel_sa_info adls_sa_info = {
384 	.deburst = 16,
385 	.deprogbwlimit = 38, /* GB/s */
386 	.displayrtids = 256,
387 	.derating = 10,
388 };
389 
390 static const struct intel_sa_info adlp_sa_info = {
391 	.deburst = 16,
392 	.deprogbwlimit = 38, /* GB/s */
393 	.displayrtids = 256,
394 	.derating = 20,
395 };
396 
397 static const struct intel_sa_info mtl_sa_info = {
398 	.deburst = 32,
399 	.deprogbwlimit = 38, /* GB/s */
400 	.displayrtids = 256,
401 	.derating = 10,
402 };
403 
404 static const struct intel_sa_info xe2_hpd_sa_info = {
405 	.derating = 30,
406 	.deprogbwlimit = 53,
407 	/* Other values not used by simplified algorithm */
408 };
409 
410 static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
411 	.derating = 45,
412 	.deprogbwlimit = 53,
413 	/* Other values not used by simplified algorithm */
414 };
415 
416 static const struct intel_sa_info xe3lpd_sa_info = {
417 	.deburst = 32,
418 	.deprogbwlimit = 65, /* GB/s */
419 	.displayrtids = 256,
420 	.derating = 10,
421 };
422 
423 static int icl_get_bw_info(struct intel_display *display,
424 			   const struct dram_info *dram_info,
425 			   const struct intel_sa_info *sa)
426 {
427 	struct intel_qgv_info qi = {};
428 	bool is_y_tile = true; /* assume y tile may be used */
429 	int num_channels = max_t(u8, 1, dram_info->num_channels);
430 	int ipqdepth, ipqdepthpch = 16;
431 	int dclk_max;
432 	int maxdebw;
433 	int num_groups = ARRAY_SIZE(display->bw.max);
434 	int i, ret;
435 
436 	ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
437 	if (ret) {
438 		drm_dbg_kms(display->drm,
439 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
440 		return ret;
441 	}
442 
443 	dclk_max = icl_sagv_max_dclk(&qi);
444 	maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10);
445 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
446 	qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
447 
448 	for (i = 0; i < num_groups; i++) {
449 		struct intel_bw_info *bi = &display->bw.max[i];
450 		int clpchgroup;
451 		int j;
452 
453 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
454 		bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
455 
456 		bi->num_qgv_points = qi.num_points;
457 		bi->num_psf_gv_points = qi.num_psf_points;
458 
459 		for (j = 0; j < qi.num_points; j++) {
460 			const struct intel_qgv_point *sp = &qi.points[j];
461 			int ct, bw;
462 
463 			/*
464 			 * Max row cycle time
465 			 *
466 			 * FIXME what is the logic behind the
467 			 * assumed burst length?
468 			 */
469 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
470 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
471 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
472 
473 			bi->deratedbw[j] = min(maxdebw,
474 					       bw * (100 - sa->derating) / 100);
475 
476 			drm_dbg_kms(display->drm,
477 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
478 				    i, j, bi->num_planes, bi->deratedbw[j]);
479 		}
480 	}
481 	/*
482 	 * In case if SAGV is disabled in BIOS, we always get 1
483 	 * SAGV point, but we can't send PCode commands to restrict it
484 	 * as it will fail and pointless anyway.
485 	 */
486 	if (qi.num_points == 1)
487 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
488 	else
489 		display->sagv.status = I915_SAGV_ENABLED;
490 
491 	return 0;
492 }
493 
494 static int tgl_get_bw_info(struct intel_display *display,
495 			   const struct dram_info *dram_info,
496 			   const struct intel_sa_info *sa)
497 {
498 	struct intel_qgv_info qi = {};
499 	bool is_y_tile = true; /* assume y tile may be used */
500 	int num_channels = max_t(u8, 1, dram_info->num_channels);
501 	int ipqdepth, ipqdepthpch = 16;
502 	int dclk_max;
503 	int maxdebw, peakbw;
504 	int clperchgroup;
505 	int num_groups = ARRAY_SIZE(display->bw.max);
506 	int i, ret;
507 
508 	ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
509 	if (ret) {
510 		drm_dbg_kms(display->drm,
511 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
512 		return ret;
513 	}
514 
515 	if (DISPLAY_VER(display) < 14 &&
516 	    (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
517 		num_channels *= 2;
518 
519 	qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
520 
521 	if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12)
522 		qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
523 
524 	if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
525 		drm_warn(display->drm, "Number of channels exceeds max number of channels.");
526 	if (qi.max_numchannels != 0)
527 		num_channels = min_t(u8, num_channels, qi.max_numchannels);
528 
529 	dclk_max = icl_sagv_max_dclk(&qi);
530 
531 	peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max;
532 	maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 100);
533 
534 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
535 	/*
536 	 * clperchgroup = 4kpagespermempage * clperchperblock,
537 	 * clperchperblock = 8 / num_channels * interleave
538 	 */
539 	clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
540 
541 	for (i = 0; i < num_groups; i++) {
542 		struct intel_bw_info *bi = &display->bw.max[i];
543 		struct intel_bw_info *bi_next;
544 		int clpchgroup;
545 		int j;
546 
547 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
548 
549 		if (i < num_groups - 1) {
550 			bi_next = &display->bw.max[i + 1];
551 
552 			if (clpchgroup < clperchgroup)
553 				bi_next->num_planes = (ipqdepth - clpchgroup) /
554 						       clpchgroup + 1;
555 			else
556 				bi_next->num_planes = 0;
557 		}
558 
559 		bi->num_qgv_points = qi.num_points;
560 		bi->num_psf_gv_points = qi.num_psf_points;
561 
562 		for (j = 0; j < qi.num_points; j++) {
563 			const struct intel_qgv_point *sp = &qi.points[j];
564 			int ct, bw;
565 
566 			/*
567 			 * Max row cycle time
568 			 *
569 			 * FIXME what is the logic behind the
570 			 * assumed burst length?
571 			 */
572 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
573 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
574 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
575 
576 			bi->deratedbw[j] = min(maxdebw,
577 					       bw * (100 - sa->derating) / 100);
578 			bi->peakbw[j] = DIV_ROUND_CLOSEST(sp->dclk *
579 							  num_channels *
580 							  qi.channel_width, 8);
581 
582 			drm_dbg_kms(display->drm,
583 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
584 				    i, j, bi->num_planes, bi->deratedbw[j],
585 				    bi->peakbw[j]);
586 		}
587 
588 		for (j = 0; j < qi.num_psf_points; j++) {
589 			const struct intel_psf_gv_point *sp = &qi.psf_points[j];
590 
591 			bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
592 
593 			drm_dbg_kms(display->drm,
594 				    "BW%d / PSF GV %d: num_planes=%d bw=%u\n",
595 				    i, j, bi->num_planes, bi->psf_bw[j]);
596 		}
597 	}
598 
599 	/*
600 	 * In case if SAGV is disabled in BIOS, we always get 1
601 	 * SAGV point, but we can't send PCode commands to restrict it
602 	 * as it will fail and pointless anyway.
603 	 */
604 	if (qi.num_points == 1)
605 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
606 	else
607 		display->sagv.status = I915_SAGV_ENABLED;
608 
609 	return 0;
610 }
611 
612 static void dg2_get_bw_info(struct intel_display *display)
613 {
614 	unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000;
615 	int num_groups = ARRAY_SIZE(display->bw.max);
616 	int i;
617 
618 	/*
619 	 * DG2 doesn't have SAGV or QGV points, just a constant max bandwidth
620 	 * that doesn't depend on the number of planes enabled. So fill all the
621 	 * plane group with constant bw information for uniformity with other
622 	 * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth,
623 	 * whereas DG2-G11 platforms have 38 GB/s.
624 	 */
625 	for (i = 0; i < num_groups; i++) {
626 		struct intel_bw_info *bi = &display->bw.max[i];
627 
628 		bi->num_planes = 1;
629 		/* Need only one dummy QGV point per group */
630 		bi->num_qgv_points = 1;
631 		bi->deratedbw[0] = deratedbw;
632 	}
633 
634 	display->sagv.status = I915_SAGV_NOT_CONTROLLED;
635 }
636 
637 static int xe2_hpd_get_bw_info(struct intel_display *display,
638 			       const struct dram_info *dram_info,
639 			       const struct intel_sa_info *sa)
640 {
641 	struct intel_qgv_info qi = {};
642 	int num_channels = dram_info->num_channels;
643 	int peakbw, maxdebw;
644 	int ret, i;
645 
646 	ret = icl_get_qgv_points(display, dram_info, &qi, true);
647 	if (ret) {
648 		drm_dbg_kms(display->drm,
649 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
650 		return ret;
651 	}
652 
653 	peakbw = num_channels * qi.channel_width / 8 * icl_sagv_max_dclk(&qi);
654 	maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 10);
655 
656 	for (i = 0; i < qi.num_points; i++) {
657 		const struct intel_qgv_point *point = &qi.points[i];
658 		int bw = num_channels * (qi.channel_width / 8) * point->dclk;
659 
660 		display->bw.max[0].deratedbw[i] =
661 			min(maxdebw, (100 - sa->derating) * bw / 100);
662 		display->bw.max[0].peakbw[i] = bw;
663 
664 		drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
665 			    i, display->bw.max[0].deratedbw[i],
666 			    display->bw.max[0].peakbw[i]);
667 	}
668 
669 	/* Bandwidth does not depend on # of planes; set all groups the same */
670 	display->bw.max[0].num_planes = 1;
671 	display->bw.max[0].num_qgv_points = qi.num_points;
672 	for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
673 		memcpy(&display->bw.max[i], &display->bw.max[0],
674 		       sizeof(display->bw.max[0]));
675 
676 	/*
677 	 * Xe2_HPD should always have exactly two QGV points representing
678 	 * battery and plugged-in operation.
679 	 */
680 	drm_WARN_ON(display->drm, qi.num_points != 2);
681 	display->sagv.status = I915_SAGV_ENABLED;
682 
683 	return 0;
684 }
685 
686 static unsigned int icl_max_bw_index(struct intel_display *display,
687 				     int num_planes, int qgv_point)
688 {
689 	int i;
690 
691 	/*
692 	 * Let's return max bw for 0 planes
693 	 */
694 	num_planes = max(1, num_planes);
695 
696 	for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) {
697 		const struct intel_bw_info *bi =
698 			&display->bw.max[i];
699 
700 		/*
701 		 * Pcode will not expose all QGV points when
702 		 * SAGV is forced to off/min/med/max.
703 		 */
704 		if (qgv_point >= bi->num_qgv_points)
705 			return UINT_MAX;
706 
707 		if (num_planes >= bi->num_planes)
708 			return i;
709 	}
710 
711 	return UINT_MAX;
712 }
713 
714 static unsigned int tgl_max_bw_index(struct intel_display *display,
715 				     int num_planes, int qgv_point)
716 {
717 	int i;
718 
719 	/*
720 	 * Let's return max bw for 0 planes
721 	 */
722 	num_planes = max(1, num_planes);
723 
724 	for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) {
725 		const struct intel_bw_info *bi =
726 			&display->bw.max[i];
727 
728 		/*
729 		 * Pcode will not expose all QGV points when
730 		 * SAGV is forced to off/min/med/max.
731 		 */
732 		if (qgv_point >= bi->num_qgv_points)
733 			return UINT_MAX;
734 
735 		if (num_planes <= bi->num_planes)
736 			return i;
737 	}
738 
739 	return 0;
740 }
741 
742 static unsigned int adl_psf_bw(struct intel_display *display,
743 			       int psf_gv_point)
744 {
745 	const struct intel_bw_info *bi =
746 			&display->bw.max[0];
747 
748 	return bi->psf_bw[psf_gv_point];
749 }
750 
751 static unsigned int icl_qgv_bw(struct intel_display *display,
752 			       int num_active_planes, int qgv_point)
753 {
754 	unsigned int idx;
755 
756 	if (DISPLAY_VER(display) >= 12)
757 		idx = tgl_max_bw_index(display, num_active_planes, qgv_point);
758 	else
759 		idx = icl_max_bw_index(display, num_active_planes, qgv_point);
760 
761 	if (idx >= ARRAY_SIZE(display->bw.max))
762 		return 0;
763 
764 	return display->bw.max[idx].deratedbw[qgv_point];
765 }
766 
767 void intel_bw_init_hw(struct intel_display *display)
768 {
769 	const struct dram_info *dram_info = intel_dram_info(display->drm);
770 
771 	if (!HAS_DISPLAY(display))
772 		return;
773 
774 	if (DISPLAY_VER(display) >= 30)
775 		tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
776 	else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
777 		 dram_info->type == INTEL_DRAM_GDDR_ECC)
778 		xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
779 	else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
780 		xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
781 	else if (DISPLAY_VER(display) >= 14)
782 		tgl_get_bw_info(display, dram_info, &mtl_sa_info);
783 	else if (display->platform.dg2)
784 		dg2_get_bw_info(display);
785 	else if (display->platform.alderlake_p)
786 		tgl_get_bw_info(display, dram_info, &adlp_sa_info);
787 	else if (display->platform.alderlake_s)
788 		tgl_get_bw_info(display, dram_info, &adls_sa_info);
789 	else if (display->platform.rocketlake)
790 		tgl_get_bw_info(display, dram_info, &rkl_sa_info);
791 	else if (DISPLAY_VER(display) == 12)
792 		tgl_get_bw_info(display, dram_info, &tgl_sa_info);
793 	else if (DISPLAY_VER(display) == 11)
794 		icl_get_bw_info(display, dram_info, &icl_sa_info);
795 }
796 
797 static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
798 {
799 	/*
800 	 * We assume cursors are small enough
801 	 * to not not cause bandwidth problems.
802 	 */
803 	return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
804 }
805 
806 static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
807 {
808 	struct intel_display *display = to_intel_display(crtc_state);
809 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
810 	unsigned int data_rate = 0;
811 	enum plane_id plane_id;
812 
813 	for_each_plane_id_on_crtc(crtc, plane_id) {
814 		/*
815 		 * We assume cursors are small enough
816 		 * to not not cause bandwidth problems.
817 		 */
818 		if (plane_id == PLANE_CURSOR)
819 			continue;
820 
821 		data_rate += crtc_state->data_rate[plane_id];
822 
823 		if (DISPLAY_VER(display) < 11)
824 			data_rate += crtc_state->data_rate_y[plane_id];
825 	}
826 
827 	return data_rate;
828 }
829 
830 /* "Maximum Pipe Read Bandwidth" */
831 static int intel_bw_crtc_min_cdclk(struct intel_display *display,
832 				   unsigned int data_rate)
833 {
834 	if (DISPLAY_VER(display) < 12)
835 		return 0;
836 
837 	return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512);
838 }
839 
840 static unsigned int intel_bw_num_active_planes(struct intel_display *display,
841 					       const struct intel_bw_state *bw_state)
842 {
843 	unsigned int num_active_planes = 0;
844 	enum pipe pipe;
845 
846 	for_each_pipe(display, pipe)
847 		num_active_planes += bw_state->num_active_planes[pipe];
848 
849 	return num_active_planes;
850 }
851 
852 static unsigned int intel_bw_data_rate(struct intel_display *display,
853 				       const struct intel_bw_state *bw_state)
854 {
855 	struct drm_i915_private *i915 = to_i915(display->drm);
856 	unsigned int data_rate = 0;
857 	enum pipe pipe;
858 
859 	for_each_pipe(display, pipe)
860 		data_rate += bw_state->data_rate[pipe];
861 
862 	if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915))
863 		data_rate = DIV_ROUND_UP(data_rate * 105, 100);
864 
865 	return data_rate;
866 }
867 
868 struct intel_bw_state *
869 intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
870 {
871 	struct intel_display *display = to_intel_display(state);
872 	struct intel_global_state *bw_state;
873 
874 	bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj);
875 
876 	return to_intel_bw_state(bw_state);
877 }
878 
879 struct intel_bw_state *
880 intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
881 {
882 	struct intel_display *display = to_intel_display(state);
883 	struct intel_global_state *bw_state;
884 
885 	bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj);
886 
887 	return to_intel_bw_state(bw_state);
888 }
889 
890 struct intel_bw_state *
891 intel_atomic_get_bw_state(struct intel_atomic_state *state)
892 {
893 	struct intel_display *display = to_intel_display(state);
894 	struct intel_global_state *bw_state;
895 
896 	bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj);
897 	if (IS_ERR(bw_state))
898 		return ERR_CAST(bw_state);
899 
900 	return to_intel_bw_state(bw_state);
901 }
902 
903 static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display,
904 					      int num_active_planes)
905 {
906 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
907 	unsigned int max_bw_point = 0;
908 	unsigned int max_bw = 0;
909 	int i;
910 
911 	for (i = 0; i < num_qgv_points; i++) {
912 		unsigned int max_data_rate =
913 			icl_qgv_bw(display, num_active_planes, i);
914 
915 		/*
916 		 * We need to know which qgv point gives us
917 		 * maximum bandwidth in order to disable SAGV
918 		 * if we find that we exceed SAGV block time
919 		 * with watermarks. By that moment we already
920 		 * have those, as it is calculated earlier in
921 		 * intel_atomic_check,
922 		 */
923 		if (max_data_rate > max_bw) {
924 			max_bw_point = BIT(i);
925 			max_bw = max_data_rate;
926 		}
927 	}
928 
929 	return max_bw_point;
930 }
931 
932 static u16 icl_prepare_qgv_points_mask(struct intel_display *display,
933 				       unsigned int qgv_points,
934 				       unsigned int psf_points)
935 {
936 	return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
937 		 ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display);
938 }
939 
940 static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display)
941 {
942 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
943 	unsigned int max_bw_point_mask = 0;
944 	unsigned int max_bw = 0;
945 	int i;
946 
947 	for (i = 0; i < num_psf_gv_points; i++) {
948 		unsigned int max_data_rate = adl_psf_bw(display, i);
949 
950 		if (max_data_rate > max_bw) {
951 			max_bw_point_mask = BIT(i);
952 			max_bw = max_data_rate;
953 		} else if (max_data_rate == max_bw) {
954 			max_bw_point_mask |= BIT(i);
955 		}
956 	}
957 
958 	return max_bw_point_mask;
959 }
960 
961 static void icl_force_disable_sagv(struct intel_display *display,
962 				   struct intel_bw_state *bw_state)
963 {
964 	unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0);
965 	unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display);
966 
967 	bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
968 								qgv_points,
969 								psf_points);
970 
971 	drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n",
972 		    bw_state->qgv_points_mask);
973 
974 	icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
975 }
976 
977 static int mtl_find_qgv_points(struct intel_display *display,
978 			       unsigned int data_rate,
979 			       unsigned int num_active_planes,
980 			       struct intel_bw_state *new_bw_state)
981 {
982 	unsigned int best_rate = UINT_MAX;
983 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
984 	unsigned int qgv_peak_bw  = 0;
985 	int i;
986 	int ret;
987 
988 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
989 	if (ret)
990 		return ret;
991 
992 	/*
993 	 * If SAGV cannot be enabled, disable the pcode SAGV by passing all 1's
994 	 * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
995 	 * not enabled. PM Demand code will clamp the value for the register
996 	 */
997 	if (!intel_can_enable_sagv(display, new_bw_state)) {
998 		new_bw_state->qgv_point_peakbw = U16_MAX;
999 		drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
1000 		return 0;
1001 	}
1002 
1003 	/*
1004 	 * Find the best QGV point by comparing the data_rate with max data rate
1005 	 * offered per plane group
1006 	 */
1007 	for (i = 0; i < num_qgv_points; i++) {
1008 		unsigned int bw_index =
1009 			tgl_max_bw_index(display, num_active_planes, i);
1010 		unsigned int max_data_rate;
1011 
1012 		if (bw_index >= ARRAY_SIZE(display->bw.max))
1013 			continue;
1014 
1015 		max_data_rate = display->bw.max[bw_index].deratedbw[i];
1016 
1017 		if (max_data_rate < data_rate)
1018 			continue;
1019 
1020 		if (max_data_rate - data_rate < best_rate) {
1021 			best_rate = max_data_rate - data_rate;
1022 			qgv_peak_bw = display->bw.max[bw_index].peakbw[i];
1023 		}
1024 
1025 		drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
1026 			    i, max_data_rate, data_rate, qgv_peak_bw);
1027 	}
1028 
1029 	drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
1030 		    qgv_peak_bw, data_rate);
1031 
1032 	/*
1033 	 * The display configuration cannot be supported if no QGV point
1034 	 * satisfying the required data rate is found
1035 	 */
1036 	if (qgv_peak_bw == 0) {
1037 		drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
1038 			    data_rate, num_active_planes);
1039 		return -EINVAL;
1040 	}
1041 
1042 	/* MTL PM DEMAND expects QGV BW parameter in multiples of 100 mbps */
1043 	new_bw_state->qgv_point_peakbw = DIV_ROUND_CLOSEST(qgv_peak_bw, 100);
1044 
1045 	return 0;
1046 }
1047 
1048 static int icl_find_qgv_points(struct intel_display *display,
1049 			       unsigned int data_rate,
1050 			       unsigned int num_active_planes,
1051 			       const struct intel_bw_state *old_bw_state,
1052 			       struct intel_bw_state *new_bw_state)
1053 {
1054 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
1055 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
1056 	u16 psf_points = 0;
1057 	u16 qgv_points = 0;
1058 	int i;
1059 	int ret;
1060 
1061 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1062 	if (ret)
1063 		return ret;
1064 
1065 	for (i = 0; i < num_qgv_points; i++) {
1066 		unsigned int max_data_rate = icl_qgv_bw(display,
1067 							num_active_planes, i);
1068 		if (max_data_rate >= data_rate)
1069 			qgv_points |= BIT(i);
1070 
1071 		drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
1072 			    i, max_data_rate, data_rate);
1073 	}
1074 
1075 	for (i = 0; i < num_psf_gv_points; i++) {
1076 		unsigned int max_data_rate = adl_psf_bw(display, i);
1077 
1078 		if (max_data_rate >= data_rate)
1079 			psf_points |= BIT(i);
1080 
1081 		drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d"
1082 			    " required %d\n",
1083 			    i, max_data_rate, data_rate);
1084 	}
1085 
1086 	/*
1087 	 * BSpec states that we always should have at least one allowed point
1088 	 * left, so if we couldn't - simply reject the configuration for obvious
1089 	 * reasons.
1090 	 */
1091 	if (qgv_points == 0) {
1092 		drm_dbg_kms(display->drm, "No QGV points provide sufficient memory"
1093 			    " bandwidth %d for display configuration(%d active planes).\n",
1094 			    data_rate, num_active_planes);
1095 		return -EINVAL;
1096 	}
1097 
1098 	if (num_psf_gv_points > 0 && psf_points == 0) {
1099 		drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory"
1100 			    " bandwidth %d for display configuration(%d active planes).\n",
1101 			    data_rate, num_active_planes);
1102 		return -EINVAL;
1103 	}
1104 
1105 	/*
1106 	 * Leave only single point with highest bandwidth, if
1107 	 * we can't enable SAGV due to the increased memory latency it may
1108 	 * cause.
1109 	 */
1110 	if (!intel_can_enable_sagv(display, new_bw_state)) {
1111 		qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
1112 		drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
1113 			    qgv_points);
1114 	}
1115 
1116 	/*
1117 	 * We store the ones which need to be masked as that is what PCode
1118 	 * actually accepts as a parameter.
1119 	 */
1120 	new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
1121 								    qgv_points,
1122 								    psf_points);
1123 	/*
1124 	 * If the actual mask had changed we need to make sure that
1125 	 * the commits are serialized(in case this is a nomodeset, nonblocking)
1126 	 */
1127 	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
1128 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1129 		if (ret)
1130 			return ret;
1131 	}
1132 
1133 	return 0;
1134 }
1135 
1136 static int intel_bw_check_qgv_points(struct intel_display *display,
1137 				     const struct intel_bw_state *old_bw_state,
1138 				     struct intel_bw_state *new_bw_state)
1139 {
1140 	unsigned int data_rate = intel_bw_data_rate(display, new_bw_state);
1141 	unsigned int num_active_planes =
1142 			intel_bw_num_active_planes(display, new_bw_state);
1143 
1144 	data_rate = DIV_ROUND_UP(data_rate, 1000);
1145 
1146 	if (DISPLAY_VER(display) >= 14)
1147 		return mtl_find_qgv_points(display, data_rate, num_active_planes,
1148 					   new_bw_state);
1149 	else
1150 		return icl_find_qgv_points(display, data_rate, num_active_planes,
1151 					   old_bw_state, new_bw_state);
1152 }
1153 
1154 static bool intel_dbuf_bw_changed(struct intel_display *display,
1155 				  const struct intel_dbuf_bw *old_dbuf_bw,
1156 				  const struct intel_dbuf_bw *new_dbuf_bw)
1157 {
1158 	enum dbuf_slice slice;
1159 
1160 	for_each_dbuf_slice(display, slice) {
1161 		if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
1162 		    old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
1163 			return true;
1164 	}
1165 
1166 	return false;
1167 }
1168 
1169 static bool intel_bw_state_changed(struct intel_display *display,
1170 				   const struct intel_bw_state *old_bw_state,
1171 				   const struct intel_bw_state *new_bw_state)
1172 {
1173 	enum pipe pipe;
1174 
1175 	for_each_pipe(display, pipe) {
1176 		const struct intel_dbuf_bw *old_dbuf_bw =
1177 			&old_bw_state->dbuf_bw[pipe];
1178 		const struct intel_dbuf_bw *new_dbuf_bw =
1179 			&new_bw_state->dbuf_bw[pipe];
1180 
1181 		if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
1182 			return true;
1183 
1184 		if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) !=
1185 		    intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe]))
1186 			return true;
1187 	}
1188 
1189 	return false;
1190 }
1191 
1192 static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
1193 				   struct intel_crtc *crtc,
1194 				   enum plane_id plane_id,
1195 				   const struct skl_ddb_entry *ddb,
1196 				   unsigned int data_rate)
1197 {
1198 	struct intel_display *display = to_intel_display(crtc);
1199 	unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
1200 	enum dbuf_slice slice;
1201 
1202 	/*
1203 	 * The arbiter can only really guarantee an
1204 	 * equal share of the total bw to each plane.
1205 	 */
1206 	for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
1207 		dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
1208 		dbuf_bw->active_planes[slice] |= BIT(plane_id);
1209 	}
1210 }
1211 
1212 static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
1213 				  const struct intel_crtc_state *crtc_state)
1214 {
1215 	struct intel_display *display = to_intel_display(crtc_state);
1216 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1217 	enum plane_id plane_id;
1218 
1219 	memset(dbuf_bw, 0, sizeof(*dbuf_bw));
1220 
1221 	if (!crtc_state->hw.active)
1222 		return;
1223 
1224 	for_each_plane_id_on_crtc(crtc, plane_id) {
1225 		/*
1226 		 * We assume cursors are small enough
1227 		 * to not cause bandwidth problems.
1228 		 */
1229 		if (plane_id == PLANE_CURSOR)
1230 			continue;
1231 
1232 		skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
1233 				       &crtc_state->wm.skl.plane_ddb[plane_id],
1234 				       crtc_state->data_rate[plane_id]);
1235 
1236 		if (DISPLAY_VER(display) < 11)
1237 			skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
1238 					       &crtc_state->wm.skl.plane_ddb_y[plane_id],
1239 					       crtc_state->data_rate[plane_id]);
1240 	}
1241 }
1242 
1243 /* "Maximum Data Buffer Bandwidth" */
1244 static int
1245 intel_bw_dbuf_min_cdclk(struct intel_display *display,
1246 			const struct intel_bw_state *bw_state)
1247 {
1248 	unsigned int total_max_bw = 0;
1249 	enum dbuf_slice slice;
1250 
1251 	for_each_dbuf_slice(display, slice) {
1252 		int num_active_planes = 0;
1253 		unsigned int max_bw = 0;
1254 		enum pipe pipe;
1255 
1256 		/*
1257 		 * The arbiter can only really guarantee an
1258 		 * equal share of the total bw to each plane.
1259 		 */
1260 		for_each_pipe(display, pipe) {
1261 			const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe];
1262 
1263 			max_bw = max(dbuf_bw->max_bw[slice], max_bw);
1264 			num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
1265 		}
1266 		max_bw *= num_active_planes;
1267 
1268 		total_max_bw = max(total_max_bw, max_bw);
1269 	}
1270 
1271 	return DIV_ROUND_UP(total_max_bw, 64);
1272 }
1273 
1274 int intel_bw_min_cdclk(struct intel_display *display,
1275 		       const struct intel_bw_state *bw_state)
1276 {
1277 	enum pipe pipe;
1278 	int min_cdclk;
1279 
1280 	min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state);
1281 
1282 	for_each_pipe(display, pipe)
1283 		min_cdclk = max(min_cdclk,
1284 				intel_bw_crtc_min_cdclk(display,
1285 							bw_state->data_rate[pipe]));
1286 
1287 	return min_cdclk;
1288 }
1289 
1290 int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
1291 			    bool *need_cdclk_calc)
1292 {
1293 	struct intel_display *display = to_intel_display(state);
1294 	struct intel_bw_state *new_bw_state = NULL;
1295 	const struct intel_bw_state *old_bw_state = NULL;
1296 	const struct intel_cdclk_state *cdclk_state;
1297 	const struct intel_crtc_state *old_crtc_state;
1298 	const struct intel_crtc_state *new_crtc_state;
1299 	int old_min_cdclk, new_min_cdclk;
1300 	struct intel_crtc *crtc;
1301 	int i;
1302 
1303 	if (DISPLAY_VER(display) < 9)
1304 		return 0;
1305 
1306 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1307 					    new_crtc_state, i) {
1308 		struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
1309 
1310 		skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
1311 		skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
1312 
1313 		if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
1314 			continue;
1315 
1316 		new_bw_state = intel_atomic_get_bw_state(state);
1317 		if (IS_ERR(new_bw_state))
1318 			return PTR_ERR(new_bw_state);
1319 
1320 		old_bw_state = intel_atomic_get_old_bw_state(state);
1321 
1322 		new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
1323 	}
1324 
1325 	if (!old_bw_state)
1326 		return 0;
1327 
1328 	if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) {
1329 		int ret = intel_atomic_lock_global_state(&new_bw_state->base);
1330 		if (ret)
1331 			return ret;
1332 	}
1333 
1334 	old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state);
1335 	new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state);
1336 
1337 	/*
1338 	 * No need to check against the cdclk state if
1339 	 * the min cdclk doesn't increase.
1340 	 *
1341 	 * Ie. we only ever increase the cdclk due to bandwidth
1342 	 * requirements. This can reduce back and forth
1343 	 * display blinking due to constant cdclk changes.
1344 	 */
1345 	if (new_min_cdclk <= old_min_cdclk)
1346 		return 0;
1347 
1348 	cdclk_state = intel_atomic_get_cdclk_state(state);
1349 	if (IS_ERR(cdclk_state))
1350 		return PTR_ERR(cdclk_state);
1351 
1352 	/*
1353 	 * No need to recalculate the cdclk state if
1354 	 * the min cdclk doesn't increase.
1355 	 *
1356 	 * Ie. we only ever increase the cdclk due to bandwidth
1357 	 * requirements. This can reduce back and forth
1358 	 * display blinking due to constant cdclk changes.
1359 	 */
1360 	if (new_min_cdclk <= cdclk_state->bw_min_cdclk)
1361 		return 0;
1362 
1363 	drm_dbg_kms(display->drm,
1364 		    "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
1365 		    new_min_cdclk, cdclk_state->bw_min_cdclk);
1366 	*need_cdclk_calc = true;
1367 
1368 	return 0;
1369 }
1370 
1371 static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
1372 {
1373 	struct intel_display *display = to_intel_display(state);
1374 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1375 	struct intel_crtc *crtc;
1376 	int i;
1377 
1378 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1379 					    new_crtc_state, i) {
1380 		unsigned int old_data_rate =
1381 			intel_bw_crtc_data_rate(old_crtc_state);
1382 		unsigned int new_data_rate =
1383 			intel_bw_crtc_data_rate(new_crtc_state);
1384 		unsigned int old_active_planes =
1385 			intel_bw_crtc_num_active_planes(old_crtc_state);
1386 		unsigned int new_active_planes =
1387 			intel_bw_crtc_num_active_planes(new_crtc_state);
1388 		struct intel_bw_state *new_bw_state;
1389 
1390 		/*
1391 		 * Avoid locking the bw state when
1392 		 * nothing significant has changed.
1393 		 */
1394 		if (old_data_rate == new_data_rate &&
1395 		    old_active_planes == new_active_planes)
1396 			continue;
1397 
1398 		new_bw_state = intel_atomic_get_bw_state(state);
1399 		if (IS_ERR(new_bw_state))
1400 			return PTR_ERR(new_bw_state);
1401 
1402 		new_bw_state->data_rate[crtc->pipe] = new_data_rate;
1403 		new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
1404 
1405 		*changed = true;
1406 
1407 		drm_dbg_kms(display->drm,
1408 			    "[CRTC:%d:%s] data rate %u num active planes %u\n",
1409 			    crtc->base.base.id, crtc->base.name,
1410 			    new_bw_state->data_rate[crtc->pipe],
1411 			    new_bw_state->num_active_planes[crtc->pipe]);
1412 	}
1413 
1414 	return 0;
1415 }
1416 
1417 static int intel_bw_modeset_checks(struct intel_atomic_state *state)
1418 {
1419 	struct intel_display *display = to_intel_display(state);
1420 	const struct intel_bw_state *old_bw_state;
1421 	struct intel_bw_state *new_bw_state;
1422 
1423 	if (DISPLAY_VER(display) < 9)
1424 		return 0;
1425 
1426 	new_bw_state = intel_atomic_get_bw_state(state);
1427 	if (IS_ERR(new_bw_state))
1428 		return PTR_ERR(new_bw_state);
1429 
1430 	old_bw_state = intel_atomic_get_old_bw_state(state);
1431 
1432 	new_bw_state->active_pipes =
1433 		intel_calc_active_pipes(state, old_bw_state->active_pipes);
1434 
1435 	if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
1436 		int ret;
1437 
1438 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
1439 		if (ret)
1440 			return ret;
1441 	}
1442 
1443 	return 0;
1444 }
1445 
1446 static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
1447 {
1448 	struct intel_display *display = to_intel_display(state);
1449 	const struct intel_crtc_state *old_crtc_state;
1450 	const struct intel_crtc_state *new_crtc_state;
1451 	const struct intel_bw_state *old_bw_state = NULL;
1452 	struct intel_bw_state *new_bw_state = NULL;
1453 	struct intel_crtc *crtc;
1454 	int ret, i;
1455 
1456 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1457 					    new_crtc_state, i) {
1458 		if (intel_crtc_can_enable_sagv(old_crtc_state) ==
1459 		    intel_crtc_can_enable_sagv(new_crtc_state))
1460 			continue;
1461 
1462 		new_bw_state = intel_atomic_get_bw_state(state);
1463 		if (IS_ERR(new_bw_state))
1464 			return PTR_ERR(new_bw_state);
1465 
1466 		old_bw_state = intel_atomic_get_old_bw_state(state);
1467 
1468 		if (intel_crtc_can_enable_sagv(new_crtc_state))
1469 			new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
1470 		else
1471 			new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
1472 	}
1473 
1474 	if (!new_bw_state)
1475 		return 0;
1476 
1477 	if (intel_can_enable_sagv(display, new_bw_state) !=
1478 	    intel_can_enable_sagv(display, old_bw_state)) {
1479 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1480 		if (ret)
1481 			return ret;
1482 	} else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
1483 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
1484 		if (ret)
1485 			return ret;
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
1492 {
1493 	struct intel_display *display = to_intel_display(state);
1494 	bool changed = false;
1495 	struct intel_bw_state *new_bw_state;
1496 	const struct intel_bw_state *old_bw_state;
1497 	int ret;
1498 
1499 	if (DISPLAY_VER(display) < 9)
1500 		return 0;
1501 
1502 	if (any_ms) {
1503 		ret = intel_bw_modeset_checks(state);
1504 		if (ret)
1505 			return ret;
1506 	}
1507 
1508 	ret = intel_bw_check_sagv_mask(state);
1509 	if (ret)
1510 		return ret;
1511 
1512 	/* FIXME earlier gens need some checks too */
1513 	if (DISPLAY_VER(display) < 11)
1514 		return 0;
1515 
1516 	ret = intel_bw_check_data_rate(state, &changed);
1517 	if (ret)
1518 		return ret;
1519 
1520 	old_bw_state = intel_atomic_get_old_bw_state(state);
1521 	new_bw_state = intel_atomic_get_new_bw_state(state);
1522 
1523 	if (new_bw_state &&
1524 	    intel_can_enable_sagv(display, old_bw_state) !=
1525 	    intel_can_enable_sagv(display, new_bw_state))
1526 		changed = true;
1527 
1528 	/*
1529 	 * If none of our inputs (data rates, number of active
1530 	 * planes, SAGV yes/no) changed then nothing to do here.
1531 	 */
1532 	if (!changed)
1533 		return 0;
1534 
1535 	ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state);
1536 	if (ret)
1537 		return ret;
1538 
1539 	return 0;
1540 }
1541 
1542 static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
1543 				 const struct intel_crtc_state *crtc_state)
1544 {
1545 	struct intel_display *display = to_intel_display(crtc_state);
1546 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1547 
1548 	bw_state->data_rate[crtc->pipe] =
1549 		intel_bw_crtc_data_rate(crtc_state);
1550 	bw_state->num_active_planes[crtc->pipe] =
1551 		intel_bw_crtc_num_active_planes(crtc_state);
1552 
1553 	drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
1554 		    pipe_name(crtc->pipe),
1555 		    bw_state->data_rate[crtc->pipe],
1556 		    bw_state->num_active_planes[crtc->pipe]);
1557 }
1558 
1559 void intel_bw_update_hw_state(struct intel_display *display)
1560 {
1561 	struct intel_bw_state *bw_state =
1562 		to_intel_bw_state(display->bw.obj.state);
1563 	struct intel_crtc *crtc;
1564 
1565 	if (DISPLAY_VER(display) < 9)
1566 		return;
1567 
1568 	bw_state->active_pipes = 0;
1569 	bw_state->pipe_sagv_reject = 0;
1570 
1571 	for_each_intel_crtc(display->drm, crtc) {
1572 		const struct intel_crtc_state *crtc_state =
1573 			to_intel_crtc_state(crtc->base.state);
1574 		enum pipe pipe = crtc->pipe;
1575 
1576 		if (crtc_state->hw.active)
1577 			bw_state->active_pipes |= BIT(pipe);
1578 
1579 		if (DISPLAY_VER(display) >= 11)
1580 			intel_bw_crtc_update(bw_state, crtc_state);
1581 
1582 		skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state);
1583 
1584 		/* initially SAGV has been forced off */
1585 		bw_state->pipe_sagv_reject |= BIT(pipe);
1586 	}
1587 }
1588 
1589 void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
1590 {
1591 	struct intel_display *display = to_intel_display(crtc);
1592 	struct intel_bw_state *bw_state =
1593 		to_intel_bw_state(display->bw.obj.state);
1594 	enum pipe pipe = crtc->pipe;
1595 
1596 	if (DISPLAY_VER(display) < 9)
1597 		return;
1598 
1599 	bw_state->data_rate[pipe] = 0;
1600 	bw_state->num_active_planes[pipe] = 0;
1601 	memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe]));
1602 }
1603 
1604 static struct intel_global_state *
1605 intel_bw_duplicate_state(struct intel_global_obj *obj)
1606 {
1607 	struct intel_bw_state *state;
1608 
1609 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
1610 	if (!state)
1611 		return NULL;
1612 
1613 	return &state->base;
1614 }
1615 
1616 static void intel_bw_destroy_state(struct intel_global_obj *obj,
1617 				   struct intel_global_state *state)
1618 {
1619 	kfree(state);
1620 }
1621 
1622 static const struct intel_global_state_funcs intel_bw_funcs = {
1623 	.atomic_duplicate_state = intel_bw_duplicate_state,
1624 	.atomic_destroy_state = intel_bw_destroy_state,
1625 };
1626 
1627 int intel_bw_init(struct intel_display *display)
1628 {
1629 	struct intel_bw_state *state;
1630 
1631 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1632 	if (!state)
1633 		return -ENOMEM;
1634 
1635 	intel_atomic_global_obj_init(display, &display->bw.obj,
1636 				     &state->base, &intel_bw_funcs);
1637 
1638 	/*
1639 	 * Limit this only if we have SAGV. And for Display version 14 onwards
1640 	 * sagv is handled though pmdemand requests
1641 	 */
1642 	if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
1643 		icl_force_disable_sagv(display, state);
1644 
1645 	return 0;
1646 }
1647