xref: /linux/drivers/gpu/drm/i915/display/intel_bw.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_atomic_state_helper.h>
7 #include <drm/drm_print.h>
8 
9 #include "soc/intel_dram.h"
10 
11 #include "i915_drv.h"
12 #include "i915_reg.h"
13 #include "i915_utils.h"
14 #include "intel_atomic.h"
15 #include "intel_bw.h"
16 #include "intel_cdclk.h"
17 #include "intel_display_core.h"
18 #include "intel_display_regs.h"
19 #include "intel_display_types.h"
20 #include "intel_mchbar_regs.h"
21 #include "intel_pcode.h"
22 #include "intel_uncore.h"
23 #include "skl_watermark.h"
24 
25 struct intel_dbuf_bw {
26 	unsigned int max_bw[I915_MAX_DBUF_SLICES];
27 	u8 active_planes[I915_MAX_DBUF_SLICES];
28 };
29 
30 struct intel_bw_state {
31 	struct intel_global_state base;
32 	struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES];
33 
34 	/*
35 	 * Contains a bit mask, used to determine, whether correspondent
36 	 * pipe allows SAGV or not.
37 	 */
38 	u8 pipe_sagv_reject;
39 
40 	/* bitmask of active pipes */
41 	u8 active_pipes;
42 
43 	/*
44 	 * From MTL onwards, to lock a QGV point, punit expects the peak BW of
45 	 * the selected QGV point as the parameter in multiples of 100MB/s
46 	 */
47 	u16 qgv_point_peakbw;
48 
49 	/*
50 	 * Current QGV points mask, which restricts
51 	 * some particular SAGV states, not to confuse
52 	 * with pipe_sagv_mask.
53 	 */
54 	u16 qgv_points_mask;
55 
56 	unsigned int data_rate[I915_MAX_PIPES];
57 	u8 num_active_planes[I915_MAX_PIPES];
58 };
59 
60 /* Parameters for Qclk Geyserville (QGV) */
61 struct intel_qgv_point {
62 	u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
63 };
64 
65 #define DEPROGBWPCLIMIT		60
66 
67 struct intel_psf_gv_point {
68 	u8 clk; /* clock in multiples of 16.6666 MHz */
69 };
70 
71 struct intel_qgv_info {
72 	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
73 	struct intel_psf_gv_point psf_points[I915_NUM_PSF_GV_POINTS];
74 	u8 num_points;
75 	u8 num_psf_points;
76 	u8 t_bl;
77 	u8 max_numchannels;
78 	u8 channel_width;
79 	u8 deinterleave;
80 };
81 
82 static int dg1_mchbar_read_qgv_point_info(struct intel_display *display,
83 					  struct intel_qgv_point *sp,
84 					  int point)
85 {
86 	struct drm_i915_private *i915 = to_i915(display->drm);
87 	u32 dclk_ratio, dclk_reference;
88 	u32 val;
89 
90 	val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
91 	dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
92 	if (val & DG1_QCLK_REFERENCE)
93 		dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
94 	else
95 		dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
96 	sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
97 
98 	val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
99 	if (val & DG1_GEAR_TYPE)
100 		sp->dclk *= 2;
101 
102 	if (sp->dclk == 0)
103 		return -EINVAL;
104 
105 	val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
106 	sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
107 	sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
108 
109 	val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
110 	sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
111 	sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
112 
113 	sp->t_rc = sp->t_rp + sp->t_ras;
114 
115 	return 0;
116 }
117 
118 static int icl_pcode_read_qgv_point_info(struct intel_display *display,
119 					 struct intel_qgv_point *sp,
120 					 int point)
121 {
122 	u32 val = 0, val2 = 0;
123 	u16 dclk;
124 	int ret;
125 
126 	ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
127 			       ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
128 			       &val, &val2);
129 	if (ret)
130 		return ret;
131 
132 	dclk = val & 0xffff;
133 	sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0),
134 				1000);
135 	sp->t_rp = (val & 0xff0000) >> 16;
136 	sp->t_rcd = (val & 0xff000000) >> 24;
137 
138 	sp->t_rdpre = val2 & 0xff;
139 	sp->t_ras = (val2 & 0xff00) >> 8;
140 
141 	sp->t_rc = sp->t_rp + sp->t_ras;
142 
143 	return 0;
144 }
145 
146 static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
147 					     struct intel_psf_gv_point *points)
148 {
149 	u32 val = 0;
150 	int ret;
151 	int i;
152 
153 	ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
154 			       ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
155 	if (ret)
156 		return ret;
157 
158 	for (i = 0; i < I915_NUM_PSF_GV_POINTS; i++) {
159 		points[i].clk = val & 0xff;
160 		val >>= 8;
161 	}
162 
163 	return 0;
164 }
165 
166 static u16 icl_qgv_points_mask(struct intel_display *display)
167 {
168 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
169 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
170 	u16 qgv_points = 0, psf_points = 0;
171 
172 	/*
173 	 * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
174 	 * it with failure if we try masking any unadvertised points.
175 	 * So need to operate only with those returned from PCode.
176 	 */
177 	if (num_qgv_points > 0)
178 		qgv_points = GENMASK(num_qgv_points - 1, 0);
179 
180 	if (num_psf_gv_points > 0)
181 		psf_points = GENMASK(num_psf_gv_points - 1, 0);
182 
183 	return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
184 }
185 
186 static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
187 {
188 	return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) &
189 			      ICL_PCODE_REQ_QGV_PT_MASK);
190 }
191 
192 static int icl_pcode_restrict_qgv_points(struct intel_display *display,
193 					 u32 points_mask)
194 {
195 	int ret;
196 
197 	if (DISPLAY_VER(display) >= 14)
198 		return 0;
199 
200 	/* bspec says to keep retrying for at least 1 ms */
201 	ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
202 				  points_mask,
203 				  ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
204 				  ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
205 				  1);
206 
207 	if (ret < 0) {
208 		drm_err(display->drm,
209 			"Failed to disable qgv points (0x%x) points: 0x%x\n",
210 			ret, points_mask);
211 		return ret;
212 	}
213 
214 	display->sagv.status = is_sagv_enabled(display, points_mask) ?
215 		I915_SAGV_ENABLED : I915_SAGV_DISABLED;
216 
217 	return 0;
218 }
219 
220 static int mtl_read_qgv_point_info(struct intel_display *display,
221 				   struct intel_qgv_point *sp, int point)
222 {
223 	struct drm_i915_private *i915 = to_i915(display->drm);
224 	u32 val, val2;
225 	u16 dclk;
226 
227 	val = intel_uncore_read(&i915->uncore,
228 				MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
229 	val2 = intel_uncore_read(&i915->uncore,
230 				 MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
231 	dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
232 	sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
233 	sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
234 	sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);
235 
236 	sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2);
237 	sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2);
238 
239 	sp->t_rc = sp->t_rp + sp->t_ras;
240 
241 	return 0;
242 }
243 
244 static int
245 intel_read_qgv_point_info(struct intel_display *display,
246 			  struct intel_qgv_point *sp,
247 			  int point)
248 {
249 	if (DISPLAY_VER(display) >= 14)
250 		return mtl_read_qgv_point_info(display, sp, point);
251 	else if (display->platform.dg1)
252 		return dg1_mchbar_read_qgv_point_info(display, sp, point);
253 	else
254 		return icl_pcode_read_qgv_point_info(display, sp, point);
255 }
256 
257 static int icl_get_qgv_points(struct intel_display *display,
258 			      const struct dram_info *dram_info,
259 			      struct intel_qgv_info *qi,
260 			      bool is_y_tile)
261 {
262 	int i, ret;
263 
264 	qi->num_points = dram_info->num_qgv_points;
265 	qi->num_psf_points = dram_info->num_psf_gv_points;
266 
267 	if (DISPLAY_VER(display) >= 14) {
268 		switch (dram_info->type) {
269 		case INTEL_DRAM_DDR4:
270 			qi->t_bl = 4;
271 			qi->max_numchannels = 2;
272 			qi->channel_width = 64;
273 			qi->deinterleave = 2;
274 			break;
275 		case INTEL_DRAM_DDR5:
276 			qi->t_bl = 8;
277 			qi->max_numchannels = 4;
278 			qi->channel_width = 32;
279 			qi->deinterleave = 2;
280 			break;
281 		case INTEL_DRAM_LPDDR4:
282 		case INTEL_DRAM_LPDDR5:
283 			qi->t_bl = 16;
284 			qi->max_numchannels = 8;
285 			qi->channel_width = 16;
286 			qi->deinterleave = 4;
287 			break;
288 		case INTEL_DRAM_GDDR:
289 		case INTEL_DRAM_GDDR_ECC:
290 			qi->channel_width = 32;
291 			break;
292 		default:
293 			MISSING_CASE(dram_info->type);
294 			return -EINVAL;
295 		}
296 	} else if (DISPLAY_VER(display) >= 12) {
297 		switch (dram_info->type) {
298 		case INTEL_DRAM_DDR4:
299 			qi->t_bl = is_y_tile ? 8 : 4;
300 			qi->max_numchannels = 2;
301 			qi->channel_width = 64;
302 			qi->deinterleave = is_y_tile ? 1 : 2;
303 			break;
304 		case INTEL_DRAM_DDR5:
305 			qi->t_bl = is_y_tile ? 16 : 8;
306 			qi->max_numchannels = 4;
307 			qi->channel_width = 32;
308 			qi->deinterleave = is_y_tile ? 1 : 2;
309 			break;
310 		case INTEL_DRAM_LPDDR4:
311 			if (display->platform.rocketlake) {
312 				qi->t_bl = 8;
313 				qi->max_numchannels = 4;
314 				qi->channel_width = 32;
315 				qi->deinterleave = 2;
316 				break;
317 			}
318 			fallthrough;
319 		case INTEL_DRAM_LPDDR5:
320 			qi->t_bl = 16;
321 			qi->max_numchannels = 8;
322 			qi->channel_width = 16;
323 			qi->deinterleave = is_y_tile ? 2 : 4;
324 			break;
325 		default:
326 			qi->t_bl = 16;
327 			qi->max_numchannels = 1;
328 			break;
329 		}
330 	} else if (DISPLAY_VER(display) == 11) {
331 		qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8;
332 		qi->max_numchannels = 1;
333 	}
334 
335 	if (drm_WARN_ON(display->drm,
336 			qi->num_points > ARRAY_SIZE(qi->points)))
337 		qi->num_points = ARRAY_SIZE(qi->points);
338 
339 	for (i = 0; i < qi->num_points; i++) {
340 		struct intel_qgv_point *sp = &qi->points[i];
341 
342 		ret = intel_read_qgv_point_info(display, sp, i);
343 		if (ret) {
344 			drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i);
345 			return ret;
346 		}
347 
348 		drm_dbg_kms(display->drm,
349 			    "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
350 			    i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
351 			    sp->t_rcd, sp->t_rc);
352 	}
353 
354 	if (qi->num_psf_points > 0) {
355 		ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points);
356 		if (ret) {
357 			drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
358 			qi->num_psf_points = 0;
359 		}
360 
361 		for (i = 0; i < qi->num_psf_points; i++)
362 			drm_dbg_kms(display->drm,
363 				    "PSF GV %d: CLK=%d\n",
364 				    i, qi->psf_points[i].clk);
365 	}
366 
367 	return 0;
368 }
369 
370 static int adl_calc_psf_bw(int clk)
371 {
372 	/*
373 	 * clk is multiples of 16.666MHz (100/6)
374 	 * According to BSpec PSF GV bandwidth is
375 	 * calculated as BW = 64 * clk * 16.666Mhz
376 	 */
377 	return DIV_ROUND_CLOSEST(64 * clk * 100, 6);
378 }
379 
380 static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
381 {
382 	u16 dclk = 0;
383 	int i;
384 
385 	for (i = 0; i < qi->num_points; i++)
386 		dclk = max(dclk, qi->points[i].dclk);
387 
388 	return dclk;
389 }
390 
391 struct intel_sa_info {
392 	u16 displayrtids;
393 	u8 deburst, deprogbwlimit, derating;
394 };
395 
396 static const struct intel_sa_info icl_sa_info = {
397 	.deburst = 8,
398 	.deprogbwlimit = 25, /* GB/s */
399 	.displayrtids = 128,
400 	.derating = 10,
401 };
402 
403 static const struct intel_sa_info tgl_sa_info = {
404 	.deburst = 16,
405 	.deprogbwlimit = 34, /* GB/s */
406 	.displayrtids = 256,
407 	.derating = 10,
408 };
409 
410 static const struct intel_sa_info rkl_sa_info = {
411 	.deburst = 8,
412 	.deprogbwlimit = 20, /* GB/s */
413 	.displayrtids = 128,
414 	.derating = 10,
415 };
416 
417 static const struct intel_sa_info adls_sa_info = {
418 	.deburst = 16,
419 	.deprogbwlimit = 38, /* GB/s */
420 	.displayrtids = 256,
421 	.derating = 10,
422 };
423 
424 static const struct intel_sa_info adlp_sa_info = {
425 	.deburst = 16,
426 	.deprogbwlimit = 38, /* GB/s */
427 	.displayrtids = 256,
428 	.derating = 20,
429 };
430 
431 static const struct intel_sa_info mtl_sa_info = {
432 	.deburst = 32,
433 	.deprogbwlimit = 38, /* GB/s */
434 	.displayrtids = 256,
435 	.derating = 10,
436 };
437 
438 static const struct intel_sa_info xe2_hpd_sa_info = {
439 	.derating = 30,
440 	.deprogbwlimit = 53,
441 	/* Other values not used by simplified algorithm */
442 };
443 
444 static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
445 	.derating = 45,
446 	.deprogbwlimit = 53,
447 	/* Other values not used by simplified algorithm */
448 };
449 
450 static const struct intel_sa_info xe3lpd_sa_info = {
451 	.deburst = 32,
452 	.deprogbwlimit = 65, /* GB/s */
453 	.displayrtids = 256,
454 	.derating = 10,
455 };
456 
457 static const struct intel_sa_info xe3lpd_3002_sa_info = {
458 	.deburst = 32,
459 	.deprogbwlimit = 22, /* GB/s */
460 	.displayrtids = 256,
461 	.derating = 10,
462 };
463 
464 static int icl_get_bw_info(struct intel_display *display,
465 			   const struct dram_info *dram_info,
466 			   const struct intel_sa_info *sa)
467 {
468 	struct intel_qgv_info qi = {};
469 	bool is_y_tile = true; /* assume y tile may be used */
470 	int num_channels = max_t(u8, 1, dram_info->num_channels);
471 	int ipqdepth, ipqdepthpch = 16;
472 	int dclk_max;
473 	int maxdebw;
474 	int num_groups = ARRAY_SIZE(display->bw.max);
475 	int i, ret;
476 
477 	ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
478 	if (ret) {
479 		drm_dbg_kms(display->drm,
480 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
481 		return ret;
482 	}
483 
484 	dclk_max = icl_sagv_max_dclk(&qi);
485 	maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10);
486 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
487 	qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
488 
489 	for (i = 0; i < num_groups; i++) {
490 		struct intel_bw_info *bi = &display->bw.max[i];
491 		int clpchgroup;
492 		int j;
493 
494 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
495 		bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
496 
497 		bi->num_qgv_points = qi.num_points;
498 		bi->num_psf_gv_points = qi.num_psf_points;
499 
500 		for (j = 0; j < qi.num_points; j++) {
501 			const struct intel_qgv_point *sp = &qi.points[j];
502 			int ct, bw;
503 
504 			/*
505 			 * Max row cycle time
506 			 *
507 			 * FIXME what is the logic behind the
508 			 * assumed burst length?
509 			 */
510 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
511 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
512 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
513 
514 			bi->deratedbw[j] = min(maxdebw,
515 					       bw * (100 - sa->derating) / 100);
516 
517 			drm_dbg_kms(display->drm,
518 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
519 				    i, j, bi->num_planes, bi->deratedbw[j]);
520 		}
521 	}
522 	/*
523 	 * In case if SAGV is disabled in BIOS, we always get 1
524 	 * SAGV point, but we can't send PCode commands to restrict it
525 	 * as it will fail and pointless anyway.
526 	 */
527 	if (qi.num_points == 1)
528 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
529 	else
530 		display->sagv.status = I915_SAGV_ENABLED;
531 
532 	return 0;
533 }
534 
535 static int tgl_get_bw_info(struct intel_display *display,
536 			   const struct dram_info *dram_info,
537 			   const struct intel_sa_info *sa)
538 {
539 	struct intel_qgv_info qi = {};
540 	bool is_y_tile = true; /* assume y tile may be used */
541 	int num_channels = max_t(u8, 1, dram_info->num_channels);
542 	int ipqdepth, ipqdepthpch = 16;
543 	int dclk_max;
544 	int maxdebw, peakbw;
545 	int clperchgroup;
546 	int num_groups = ARRAY_SIZE(display->bw.max);
547 	int i, ret;
548 
549 	ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
550 	if (ret) {
551 		drm_dbg_kms(display->drm,
552 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
553 		return ret;
554 	}
555 
556 	if (DISPLAY_VER(display) < 14 &&
557 	    (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
558 		num_channels *= 2;
559 
560 	qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
561 
562 	if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12)
563 		qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
564 
565 	if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
566 		drm_warn(display->drm, "Number of channels exceeds max number of channels.");
567 	if (qi.max_numchannels != 0)
568 		num_channels = min_t(u8, num_channels, qi.max_numchannels);
569 
570 	dclk_max = icl_sagv_max_dclk(&qi);
571 
572 	peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max;
573 	maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 100);
574 
575 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
576 	/*
577 	 * clperchgroup = 4kpagespermempage * clperchperblock,
578 	 * clperchperblock = 8 / num_channels * interleave
579 	 */
580 	clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
581 
582 	for (i = 0; i < num_groups; i++) {
583 		struct intel_bw_info *bi = &display->bw.max[i];
584 		struct intel_bw_info *bi_next;
585 		int clpchgroup;
586 		int j;
587 
588 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
589 
590 		if (i < num_groups - 1) {
591 			bi_next = &display->bw.max[i + 1];
592 
593 			if (clpchgroup < clperchgroup)
594 				bi_next->num_planes = (ipqdepth - clpchgroup) /
595 						       clpchgroup + 1;
596 			else
597 				bi_next->num_planes = 0;
598 		}
599 
600 		bi->num_qgv_points = qi.num_points;
601 		bi->num_psf_gv_points = qi.num_psf_points;
602 
603 		for (j = 0; j < qi.num_points; j++) {
604 			const struct intel_qgv_point *sp = &qi.points[j];
605 			int ct, bw;
606 
607 			/*
608 			 * Max row cycle time
609 			 *
610 			 * FIXME what is the logic behind the
611 			 * assumed burst length?
612 			 */
613 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
614 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
615 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
616 
617 			bi->deratedbw[j] = min(maxdebw,
618 					       bw * (100 - sa->derating) / 100);
619 			bi->peakbw[j] = DIV_ROUND_CLOSEST(sp->dclk *
620 							  num_channels *
621 							  qi.channel_width, 8);
622 
623 			drm_dbg_kms(display->drm,
624 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
625 				    i, j, bi->num_planes, bi->deratedbw[j],
626 				    bi->peakbw[j]);
627 		}
628 
629 		for (j = 0; j < qi.num_psf_points; j++) {
630 			const struct intel_psf_gv_point *sp = &qi.psf_points[j];
631 
632 			bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
633 
634 			drm_dbg_kms(display->drm,
635 				    "BW%d / PSF GV %d: num_planes=%d bw=%u\n",
636 				    i, j, bi->num_planes, bi->psf_bw[j]);
637 		}
638 	}
639 
640 	/*
641 	 * In case if SAGV is disabled in BIOS, we always get 1
642 	 * SAGV point, but we can't send PCode commands to restrict it
643 	 * as it will fail and pointless anyway.
644 	 */
645 	if (qi.num_points == 1)
646 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
647 	else
648 		display->sagv.status = I915_SAGV_ENABLED;
649 
650 	return 0;
651 }
652 
653 static void dg2_get_bw_info(struct intel_display *display)
654 {
655 	unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000;
656 	int num_groups = ARRAY_SIZE(display->bw.max);
657 	int i;
658 
659 	/*
660 	 * DG2 doesn't have SAGV or QGV points, just a constant max bandwidth
661 	 * that doesn't depend on the number of planes enabled. So fill all the
662 	 * plane group with constant bw information for uniformity with other
663 	 * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth,
664 	 * whereas DG2-G11 platforms have 38 GB/s.
665 	 */
666 	for (i = 0; i < num_groups; i++) {
667 		struct intel_bw_info *bi = &display->bw.max[i];
668 
669 		bi->num_planes = 1;
670 		/* Need only one dummy QGV point per group */
671 		bi->num_qgv_points = 1;
672 		bi->deratedbw[0] = deratedbw;
673 	}
674 
675 	display->sagv.status = I915_SAGV_NOT_CONTROLLED;
676 }
677 
678 static int xe2_hpd_get_bw_info(struct intel_display *display,
679 			       const struct dram_info *dram_info,
680 			       const struct intel_sa_info *sa)
681 {
682 	struct intel_qgv_info qi = {};
683 	int num_channels = dram_info->num_channels;
684 	int peakbw, maxdebw;
685 	int ret, i;
686 
687 	ret = icl_get_qgv_points(display, dram_info, &qi, true);
688 	if (ret) {
689 		drm_dbg_kms(display->drm,
690 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
691 		return ret;
692 	}
693 
694 	peakbw = num_channels * qi.channel_width / 8 * icl_sagv_max_dclk(&qi);
695 	maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 10);
696 
697 	for (i = 0; i < qi.num_points; i++) {
698 		const struct intel_qgv_point *point = &qi.points[i];
699 		int bw = num_channels * (qi.channel_width / 8) * point->dclk;
700 
701 		display->bw.max[0].deratedbw[i] =
702 			min(maxdebw, (100 - sa->derating) * bw / 100);
703 		display->bw.max[0].peakbw[i] = bw;
704 
705 		drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
706 			    i, display->bw.max[0].deratedbw[i],
707 			    display->bw.max[0].peakbw[i]);
708 	}
709 
710 	/* Bandwidth does not depend on # of planes; set all groups the same */
711 	display->bw.max[0].num_planes = 1;
712 	display->bw.max[0].num_qgv_points = qi.num_points;
713 	for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
714 		memcpy(&display->bw.max[i], &display->bw.max[0],
715 		       sizeof(display->bw.max[0]));
716 
717 	/*
718 	 * Xe2_HPD should always have exactly two QGV points representing
719 	 * battery and plugged-in operation.
720 	 */
721 	drm_WARN_ON(display->drm, qi.num_points != 2);
722 	display->sagv.status = I915_SAGV_ENABLED;
723 
724 	return 0;
725 }
726 
727 static unsigned int icl_max_bw_index(struct intel_display *display,
728 				     int num_planes, int qgv_point)
729 {
730 	int i;
731 
732 	/*
733 	 * Let's return max bw for 0 planes
734 	 */
735 	num_planes = max(1, num_planes);
736 
737 	for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) {
738 		const struct intel_bw_info *bi =
739 			&display->bw.max[i];
740 
741 		/*
742 		 * Pcode will not expose all QGV points when
743 		 * SAGV is forced to off/min/med/max.
744 		 */
745 		if (qgv_point >= bi->num_qgv_points)
746 			return UINT_MAX;
747 
748 		if (num_planes >= bi->num_planes)
749 			return i;
750 	}
751 
752 	return UINT_MAX;
753 }
754 
755 static unsigned int tgl_max_bw_index(struct intel_display *display,
756 				     int num_planes, int qgv_point)
757 {
758 	int i;
759 
760 	/*
761 	 * Let's return max bw for 0 planes
762 	 */
763 	num_planes = max(1, num_planes);
764 
765 	for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) {
766 		const struct intel_bw_info *bi =
767 			&display->bw.max[i];
768 
769 		/*
770 		 * Pcode will not expose all QGV points when
771 		 * SAGV is forced to off/min/med/max.
772 		 */
773 		if (qgv_point >= bi->num_qgv_points)
774 			return UINT_MAX;
775 
776 		if (num_planes <= bi->num_planes)
777 			return i;
778 	}
779 
780 	return 0;
781 }
782 
783 static unsigned int adl_psf_bw(struct intel_display *display,
784 			       int psf_gv_point)
785 {
786 	const struct intel_bw_info *bi =
787 			&display->bw.max[0];
788 
789 	return bi->psf_bw[psf_gv_point];
790 }
791 
792 static unsigned int icl_qgv_bw(struct intel_display *display,
793 			       int num_active_planes, int qgv_point)
794 {
795 	unsigned int idx;
796 
797 	if (DISPLAY_VER(display) >= 12)
798 		idx = tgl_max_bw_index(display, num_active_planes, qgv_point);
799 	else
800 		idx = icl_max_bw_index(display, num_active_planes, qgv_point);
801 
802 	if (idx >= ARRAY_SIZE(display->bw.max))
803 		return 0;
804 
805 	return display->bw.max[idx].deratedbw[qgv_point];
806 }
807 
808 void intel_bw_init_hw(struct intel_display *display)
809 {
810 	const struct dram_info *dram_info = intel_dram_info(display->drm);
811 
812 	if (!HAS_DISPLAY(display))
813 		return;
814 
815 	if (DISPLAY_VERx100(display) >= 3002)
816 		tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
817 	else if (DISPLAY_VER(display) >= 30)
818 		tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
819 	else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
820 		 dram_info->type == INTEL_DRAM_GDDR_ECC)
821 		xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
822 	else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
823 		xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
824 	else if (DISPLAY_VER(display) >= 14)
825 		tgl_get_bw_info(display, dram_info, &mtl_sa_info);
826 	else if (display->platform.dg2)
827 		dg2_get_bw_info(display);
828 	else if (display->platform.alderlake_p)
829 		tgl_get_bw_info(display, dram_info, &adlp_sa_info);
830 	else if (display->platform.alderlake_s)
831 		tgl_get_bw_info(display, dram_info, &adls_sa_info);
832 	else if (display->platform.rocketlake)
833 		tgl_get_bw_info(display, dram_info, &rkl_sa_info);
834 	else if (DISPLAY_VER(display) == 12)
835 		tgl_get_bw_info(display, dram_info, &tgl_sa_info);
836 	else if (DISPLAY_VER(display) == 11)
837 		icl_get_bw_info(display, dram_info, &icl_sa_info);
838 }
839 
840 static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state)
841 {
842 	/*
843 	 * We assume cursors are small enough
844 	 * to not not cause bandwidth problems.
845 	 */
846 	return hweight8(crtc_state->active_planes & ~BIT(PLANE_CURSOR));
847 }
848 
849 static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state)
850 {
851 	struct intel_display *display = to_intel_display(crtc_state);
852 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
853 	unsigned int data_rate = 0;
854 	enum plane_id plane_id;
855 
856 	for_each_plane_id_on_crtc(crtc, plane_id) {
857 		/*
858 		 * We assume cursors are small enough
859 		 * to not not cause bandwidth problems.
860 		 */
861 		if (plane_id == PLANE_CURSOR)
862 			continue;
863 
864 		data_rate += crtc_state->data_rate[plane_id];
865 
866 		if (DISPLAY_VER(display) < 11)
867 			data_rate += crtc_state->data_rate_y[plane_id];
868 	}
869 
870 	return data_rate;
871 }
872 
873 /* "Maximum Pipe Read Bandwidth" */
874 static int intel_bw_crtc_min_cdclk(struct intel_display *display,
875 				   unsigned int data_rate)
876 {
877 	if (DISPLAY_VER(display) < 12)
878 		return 0;
879 
880 	return DIV_ROUND_UP_ULL(mul_u32_u32(data_rate, 10), 512);
881 }
882 
883 static unsigned int intel_bw_num_active_planes(struct intel_display *display,
884 					       const struct intel_bw_state *bw_state)
885 {
886 	unsigned int num_active_planes = 0;
887 	enum pipe pipe;
888 
889 	for_each_pipe(display, pipe)
890 		num_active_planes += bw_state->num_active_planes[pipe];
891 
892 	return num_active_planes;
893 }
894 
895 static unsigned int intel_bw_data_rate(struct intel_display *display,
896 				       const struct intel_bw_state *bw_state)
897 {
898 	struct drm_i915_private *i915 = to_i915(display->drm);
899 	unsigned int data_rate = 0;
900 	enum pipe pipe;
901 
902 	for_each_pipe(display, pipe)
903 		data_rate += bw_state->data_rate[pipe];
904 
905 	if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915))
906 		data_rate = DIV_ROUND_UP(data_rate * 105, 100);
907 
908 	return data_rate;
909 }
910 
911 struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state)
912 {
913 	return container_of(obj_state, struct intel_bw_state, base);
914 }
915 
916 struct intel_bw_state *
917 intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
918 {
919 	struct intel_display *display = to_intel_display(state);
920 	struct intel_global_state *bw_state;
921 
922 	bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj);
923 
924 	return to_intel_bw_state(bw_state);
925 }
926 
927 struct intel_bw_state *
928 intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
929 {
930 	struct intel_display *display = to_intel_display(state);
931 	struct intel_global_state *bw_state;
932 
933 	bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj);
934 
935 	return to_intel_bw_state(bw_state);
936 }
937 
938 struct intel_bw_state *
939 intel_atomic_get_bw_state(struct intel_atomic_state *state)
940 {
941 	struct intel_display *display = to_intel_display(state);
942 	struct intel_global_state *bw_state;
943 
944 	bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj);
945 	if (IS_ERR(bw_state))
946 		return ERR_CAST(bw_state);
947 
948 	return to_intel_bw_state(bw_state);
949 }
950 
951 static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display,
952 					      int num_active_planes)
953 {
954 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
955 	unsigned int max_bw_point = 0;
956 	unsigned int max_bw = 0;
957 	int i;
958 
959 	for (i = 0; i < num_qgv_points; i++) {
960 		unsigned int max_data_rate =
961 			icl_qgv_bw(display, num_active_planes, i);
962 
963 		/*
964 		 * We need to know which qgv point gives us
965 		 * maximum bandwidth in order to disable SAGV
966 		 * if we find that we exceed SAGV block time
967 		 * with watermarks. By that moment we already
968 		 * have those, as it is calculated earlier in
969 		 * intel_atomic_check,
970 		 */
971 		if (max_data_rate > max_bw) {
972 			max_bw_point = BIT(i);
973 			max_bw = max_data_rate;
974 		}
975 	}
976 
977 	return max_bw_point;
978 }
979 
980 static u16 icl_prepare_qgv_points_mask(struct intel_display *display,
981 				       unsigned int qgv_points,
982 				       unsigned int psf_points)
983 {
984 	return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
985 		 ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display);
986 }
987 
988 static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display)
989 {
990 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
991 	unsigned int max_bw_point_mask = 0;
992 	unsigned int max_bw = 0;
993 	int i;
994 
995 	for (i = 0; i < num_psf_gv_points; i++) {
996 		unsigned int max_data_rate = adl_psf_bw(display, i);
997 
998 		if (max_data_rate > max_bw) {
999 			max_bw_point_mask = BIT(i);
1000 			max_bw = max_data_rate;
1001 		} else if (max_data_rate == max_bw) {
1002 			max_bw_point_mask |= BIT(i);
1003 		}
1004 	}
1005 
1006 	return max_bw_point_mask;
1007 }
1008 
1009 static void icl_force_disable_sagv(struct intel_display *display,
1010 				   struct intel_bw_state *bw_state)
1011 {
1012 	unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0);
1013 	unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display);
1014 
1015 	bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
1016 								qgv_points,
1017 								psf_points);
1018 
1019 	drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n",
1020 		    bw_state->qgv_points_mask);
1021 
1022 	icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
1023 }
1024 
1025 void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
1026 {
1027 	struct intel_display *display = to_intel_display(state);
1028 	const struct intel_bw_state *old_bw_state =
1029 		intel_atomic_get_old_bw_state(state);
1030 	const struct intel_bw_state *new_bw_state =
1031 		intel_atomic_get_new_bw_state(state);
1032 	u16 old_mask, new_mask;
1033 
1034 	if (!new_bw_state)
1035 		return;
1036 
1037 	old_mask = old_bw_state->qgv_points_mask;
1038 	new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
1039 
1040 	if (old_mask == new_mask)
1041 		return;
1042 
1043 	WARN_ON(!new_bw_state->base.changed);
1044 
1045 	drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
1046 		    old_mask, new_mask);
1047 
1048 	/*
1049 	 * Restrict required qgv points before updating the configuration.
1050 	 * According to BSpec we can't mask and unmask qgv points at the same
1051 	 * time. Also masking should be done before updating the configuration
1052 	 * and unmasking afterwards.
1053 	 */
1054 	icl_pcode_restrict_qgv_points(display, new_mask);
1055 }
1056 
1057 void icl_sagv_post_plane_update(struct intel_atomic_state *state)
1058 {
1059 	struct intel_display *display = to_intel_display(state);
1060 	const struct intel_bw_state *old_bw_state =
1061 		intel_atomic_get_old_bw_state(state);
1062 	const struct intel_bw_state *new_bw_state =
1063 		intel_atomic_get_new_bw_state(state);
1064 	u16 old_mask, new_mask;
1065 
1066 	if (!new_bw_state)
1067 		return;
1068 
1069 	old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
1070 	new_mask = new_bw_state->qgv_points_mask;
1071 
1072 	if (old_mask == new_mask)
1073 		return;
1074 
1075 	WARN_ON(!new_bw_state->base.changed);
1076 
1077 	drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
1078 		    old_mask, new_mask);
1079 
1080 	/*
1081 	 * Allow required qgv points after updating the configuration.
1082 	 * According to BSpec we can't mask and unmask qgv points at the same
1083 	 * time. Also masking should be done before updating the configuration
1084 	 * and unmasking afterwards.
1085 	 */
1086 	icl_pcode_restrict_qgv_points(display, new_mask);
1087 }
1088 
1089 static int mtl_find_qgv_points(struct intel_display *display,
1090 			       unsigned int data_rate,
1091 			       unsigned int num_active_planes,
1092 			       struct intel_bw_state *new_bw_state)
1093 {
1094 	unsigned int best_rate = UINT_MAX;
1095 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
1096 	unsigned int qgv_peak_bw  = 0;
1097 	int i;
1098 	int ret;
1099 
1100 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1101 	if (ret)
1102 		return ret;
1103 
1104 	/*
1105 	 * If SAGV cannot be enabled, disable the pcode SAGV by passing all 1's
1106 	 * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
1107 	 * not enabled. PM Demand code will clamp the value for the register
1108 	 */
1109 	if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
1110 		new_bw_state->qgv_point_peakbw = U16_MAX;
1111 		drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
1112 		return 0;
1113 	}
1114 
1115 	/*
1116 	 * Find the best QGV point by comparing the data_rate with max data rate
1117 	 * offered per plane group
1118 	 */
1119 	for (i = 0; i < num_qgv_points; i++) {
1120 		unsigned int bw_index =
1121 			tgl_max_bw_index(display, num_active_planes, i);
1122 		unsigned int max_data_rate;
1123 
1124 		if (bw_index >= ARRAY_SIZE(display->bw.max))
1125 			continue;
1126 
1127 		max_data_rate = display->bw.max[bw_index].deratedbw[i];
1128 
1129 		if (max_data_rate < data_rate)
1130 			continue;
1131 
1132 		if (max_data_rate - data_rate < best_rate) {
1133 			best_rate = max_data_rate - data_rate;
1134 			qgv_peak_bw = display->bw.max[bw_index].peakbw[i];
1135 		}
1136 
1137 		drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
1138 			    i, max_data_rate, data_rate, qgv_peak_bw);
1139 	}
1140 
1141 	drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
1142 		    qgv_peak_bw, data_rate);
1143 
1144 	/*
1145 	 * The display configuration cannot be supported if no QGV point
1146 	 * satisfying the required data rate is found
1147 	 */
1148 	if (qgv_peak_bw == 0) {
1149 		drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
1150 			    data_rate, num_active_planes);
1151 		return -EINVAL;
1152 	}
1153 
1154 	/* MTL PM DEMAND expects QGV BW parameter in multiples of 100 mbps */
1155 	new_bw_state->qgv_point_peakbw = DIV_ROUND_CLOSEST(qgv_peak_bw, 100);
1156 
1157 	return 0;
1158 }
1159 
1160 static int icl_find_qgv_points(struct intel_display *display,
1161 			       unsigned int data_rate,
1162 			       unsigned int num_active_planes,
1163 			       const struct intel_bw_state *old_bw_state,
1164 			       struct intel_bw_state *new_bw_state)
1165 {
1166 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
1167 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
1168 	u16 psf_points = 0;
1169 	u16 qgv_points = 0;
1170 	int i;
1171 	int ret;
1172 
1173 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1174 	if (ret)
1175 		return ret;
1176 
1177 	for (i = 0; i < num_qgv_points; i++) {
1178 		unsigned int max_data_rate = icl_qgv_bw(display,
1179 							num_active_planes, i);
1180 		if (max_data_rate >= data_rate)
1181 			qgv_points |= BIT(i);
1182 
1183 		drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
1184 			    i, max_data_rate, data_rate);
1185 	}
1186 
1187 	for (i = 0; i < num_psf_gv_points; i++) {
1188 		unsigned int max_data_rate = adl_psf_bw(display, i);
1189 
1190 		if (max_data_rate >= data_rate)
1191 			psf_points |= BIT(i);
1192 
1193 		drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d"
1194 			    " required %d\n",
1195 			    i, max_data_rate, data_rate);
1196 	}
1197 
1198 	/*
1199 	 * BSpec states that we always should have at least one allowed point
1200 	 * left, so if we couldn't - simply reject the configuration for obvious
1201 	 * reasons.
1202 	 */
1203 	if (qgv_points == 0) {
1204 		drm_dbg_kms(display->drm, "No QGV points provide sufficient memory"
1205 			    " bandwidth %d for display configuration(%d active planes).\n",
1206 			    data_rate, num_active_planes);
1207 		return -EINVAL;
1208 	}
1209 
1210 	if (num_psf_gv_points > 0 && psf_points == 0) {
1211 		drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory"
1212 			    " bandwidth %d for display configuration(%d active planes).\n",
1213 			    data_rate, num_active_planes);
1214 		return -EINVAL;
1215 	}
1216 
1217 	/*
1218 	 * Leave only single point with highest bandwidth, if
1219 	 * we can't enable SAGV due to the increased memory latency it may
1220 	 * cause.
1221 	 */
1222 	if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
1223 		qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
1224 		drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
1225 			    qgv_points);
1226 	}
1227 
1228 	/*
1229 	 * We store the ones which need to be masked as that is what PCode
1230 	 * actually accepts as a parameter.
1231 	 */
1232 	new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
1233 								    qgv_points,
1234 								    psf_points);
1235 	/*
1236 	 * If the actual mask had changed we need to make sure that
1237 	 * the commits are serialized(in case this is a nomodeset, nonblocking)
1238 	 */
1239 	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
1240 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1241 		if (ret)
1242 			return ret;
1243 	}
1244 
1245 	return 0;
1246 }
1247 
1248 static int intel_bw_check_qgv_points(struct intel_display *display,
1249 				     const struct intel_bw_state *old_bw_state,
1250 				     struct intel_bw_state *new_bw_state)
1251 {
1252 	unsigned int data_rate = intel_bw_data_rate(display, new_bw_state);
1253 	unsigned int num_active_planes =
1254 			intel_bw_num_active_planes(display, new_bw_state);
1255 
1256 	data_rate = DIV_ROUND_UP(data_rate, 1000);
1257 
1258 	if (DISPLAY_VER(display) >= 14)
1259 		return mtl_find_qgv_points(display, data_rate, num_active_planes,
1260 					   new_bw_state);
1261 	else
1262 		return icl_find_qgv_points(display, data_rate, num_active_planes,
1263 					   old_bw_state, new_bw_state);
1264 }
1265 
1266 static bool intel_dbuf_bw_changed(struct intel_display *display,
1267 				  const struct intel_dbuf_bw *old_dbuf_bw,
1268 				  const struct intel_dbuf_bw *new_dbuf_bw)
1269 {
1270 	enum dbuf_slice slice;
1271 
1272 	for_each_dbuf_slice(display, slice) {
1273 		if (old_dbuf_bw->max_bw[slice] != new_dbuf_bw->max_bw[slice] ||
1274 		    old_dbuf_bw->active_planes[slice] != new_dbuf_bw->active_planes[slice])
1275 			return true;
1276 	}
1277 
1278 	return false;
1279 }
1280 
1281 static bool intel_bw_state_changed(struct intel_display *display,
1282 				   const struct intel_bw_state *old_bw_state,
1283 				   const struct intel_bw_state *new_bw_state)
1284 {
1285 	enum pipe pipe;
1286 
1287 	for_each_pipe(display, pipe) {
1288 		const struct intel_dbuf_bw *old_dbuf_bw =
1289 			&old_bw_state->dbuf_bw[pipe];
1290 		const struct intel_dbuf_bw *new_dbuf_bw =
1291 			&new_bw_state->dbuf_bw[pipe];
1292 
1293 		if (intel_dbuf_bw_changed(display, old_dbuf_bw, new_dbuf_bw))
1294 			return true;
1295 
1296 		if (intel_bw_crtc_min_cdclk(display, old_bw_state->data_rate[pipe]) !=
1297 		    intel_bw_crtc_min_cdclk(display, new_bw_state->data_rate[pipe]))
1298 			return true;
1299 	}
1300 
1301 	return false;
1302 }
1303 
1304 static void skl_plane_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
1305 				   struct intel_crtc *crtc,
1306 				   enum plane_id plane_id,
1307 				   const struct skl_ddb_entry *ddb,
1308 				   unsigned int data_rate)
1309 {
1310 	struct intel_display *display = to_intel_display(crtc);
1311 	unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(display, ddb);
1312 	enum dbuf_slice slice;
1313 
1314 	/*
1315 	 * The arbiter can only really guarantee an
1316 	 * equal share of the total bw to each plane.
1317 	 */
1318 	for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) {
1319 		dbuf_bw->max_bw[slice] = max(dbuf_bw->max_bw[slice], data_rate);
1320 		dbuf_bw->active_planes[slice] |= BIT(plane_id);
1321 	}
1322 }
1323 
1324 static void skl_crtc_calc_dbuf_bw(struct intel_dbuf_bw *dbuf_bw,
1325 				  const struct intel_crtc_state *crtc_state)
1326 {
1327 	struct intel_display *display = to_intel_display(crtc_state);
1328 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1329 	enum plane_id plane_id;
1330 
1331 	memset(dbuf_bw, 0, sizeof(*dbuf_bw));
1332 
1333 	if (!crtc_state->hw.active)
1334 		return;
1335 
1336 	for_each_plane_id_on_crtc(crtc, plane_id) {
1337 		/*
1338 		 * We assume cursors are small enough
1339 		 * to not cause bandwidth problems.
1340 		 */
1341 		if (plane_id == PLANE_CURSOR)
1342 			continue;
1343 
1344 		skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
1345 				       &crtc_state->wm.skl.plane_ddb[plane_id],
1346 				       crtc_state->data_rate[plane_id]);
1347 
1348 		if (DISPLAY_VER(display) < 11)
1349 			skl_plane_calc_dbuf_bw(dbuf_bw, crtc, plane_id,
1350 					       &crtc_state->wm.skl.plane_ddb_y[plane_id],
1351 					       crtc_state->data_rate[plane_id]);
1352 	}
1353 }
1354 
1355 /* "Maximum Data Buffer Bandwidth" */
1356 static int
1357 intel_bw_dbuf_min_cdclk(struct intel_display *display,
1358 			const struct intel_bw_state *bw_state)
1359 {
1360 	unsigned int total_max_bw = 0;
1361 	enum dbuf_slice slice;
1362 
1363 	for_each_dbuf_slice(display, slice) {
1364 		int num_active_planes = 0;
1365 		unsigned int max_bw = 0;
1366 		enum pipe pipe;
1367 
1368 		/*
1369 		 * The arbiter can only really guarantee an
1370 		 * equal share of the total bw to each plane.
1371 		 */
1372 		for_each_pipe(display, pipe) {
1373 			const struct intel_dbuf_bw *dbuf_bw = &bw_state->dbuf_bw[pipe];
1374 
1375 			max_bw = max(dbuf_bw->max_bw[slice], max_bw);
1376 			num_active_planes += hweight8(dbuf_bw->active_planes[slice]);
1377 		}
1378 		max_bw *= num_active_planes;
1379 
1380 		total_max_bw = max(total_max_bw, max_bw);
1381 	}
1382 
1383 	return DIV_ROUND_UP(total_max_bw, 64);
1384 }
1385 
1386 int intel_bw_min_cdclk(struct intel_display *display,
1387 		       const struct intel_bw_state *bw_state)
1388 {
1389 	enum pipe pipe;
1390 	int min_cdclk;
1391 
1392 	min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state);
1393 
1394 	for_each_pipe(display, pipe)
1395 		min_cdclk = max(min_cdclk,
1396 				intel_bw_crtc_min_cdclk(display,
1397 							bw_state->data_rate[pipe]));
1398 
1399 	return min_cdclk;
1400 }
1401 
1402 int intel_bw_calc_min_cdclk(struct intel_atomic_state *state,
1403 			    bool *need_cdclk_calc)
1404 {
1405 	struct intel_display *display = to_intel_display(state);
1406 	struct intel_bw_state *new_bw_state = NULL;
1407 	const struct intel_bw_state *old_bw_state = NULL;
1408 	const struct intel_cdclk_state *cdclk_state;
1409 	const struct intel_crtc_state *old_crtc_state;
1410 	const struct intel_crtc_state *new_crtc_state;
1411 	int old_min_cdclk, new_min_cdclk;
1412 	struct intel_crtc *crtc;
1413 	int i;
1414 
1415 	if (DISPLAY_VER(display) < 9)
1416 		return 0;
1417 
1418 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1419 					    new_crtc_state, i) {
1420 		struct intel_dbuf_bw old_dbuf_bw, new_dbuf_bw;
1421 
1422 		skl_crtc_calc_dbuf_bw(&old_dbuf_bw, old_crtc_state);
1423 		skl_crtc_calc_dbuf_bw(&new_dbuf_bw, new_crtc_state);
1424 
1425 		if (!intel_dbuf_bw_changed(display, &old_dbuf_bw, &new_dbuf_bw))
1426 			continue;
1427 
1428 		new_bw_state = intel_atomic_get_bw_state(state);
1429 		if (IS_ERR(new_bw_state))
1430 			return PTR_ERR(new_bw_state);
1431 
1432 		old_bw_state = intel_atomic_get_old_bw_state(state);
1433 
1434 		new_bw_state->dbuf_bw[crtc->pipe] = new_dbuf_bw;
1435 	}
1436 
1437 	if (!old_bw_state)
1438 		return 0;
1439 
1440 	if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) {
1441 		int ret = intel_atomic_lock_global_state(&new_bw_state->base);
1442 		if (ret)
1443 			return ret;
1444 	}
1445 
1446 	old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state);
1447 	new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state);
1448 
1449 	/*
1450 	 * No need to check against the cdclk state if
1451 	 * the min cdclk doesn't increase.
1452 	 *
1453 	 * Ie. we only ever increase the cdclk due to bandwidth
1454 	 * requirements. This can reduce back and forth
1455 	 * display blinking due to constant cdclk changes.
1456 	 */
1457 	if (new_min_cdclk <= old_min_cdclk)
1458 		return 0;
1459 
1460 	cdclk_state = intel_atomic_get_cdclk_state(state);
1461 	if (IS_ERR(cdclk_state))
1462 		return PTR_ERR(cdclk_state);
1463 
1464 	/*
1465 	 * No need to recalculate the cdclk state if
1466 	 * the min cdclk doesn't increase.
1467 	 *
1468 	 * Ie. we only ever increase the cdclk due to bandwidth
1469 	 * requirements. This can reduce back and forth
1470 	 * display blinking due to constant cdclk changes.
1471 	 */
1472 	if (new_min_cdclk <= intel_cdclk_bw_min_cdclk(cdclk_state))
1473 		return 0;
1474 
1475 	drm_dbg_kms(display->drm,
1476 		    "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n",
1477 		    new_min_cdclk, intel_cdclk_bw_min_cdclk(cdclk_state));
1478 	*need_cdclk_calc = true;
1479 
1480 	return 0;
1481 }
1482 
1483 static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
1484 {
1485 	struct intel_display *display = to_intel_display(state);
1486 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1487 	struct intel_crtc *crtc;
1488 	int i;
1489 
1490 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1491 					    new_crtc_state, i) {
1492 		unsigned int old_data_rate =
1493 			intel_bw_crtc_data_rate(old_crtc_state);
1494 		unsigned int new_data_rate =
1495 			intel_bw_crtc_data_rate(new_crtc_state);
1496 		unsigned int old_active_planes =
1497 			intel_bw_crtc_num_active_planes(old_crtc_state);
1498 		unsigned int new_active_planes =
1499 			intel_bw_crtc_num_active_planes(new_crtc_state);
1500 		struct intel_bw_state *new_bw_state;
1501 
1502 		/*
1503 		 * Avoid locking the bw state when
1504 		 * nothing significant has changed.
1505 		 */
1506 		if (old_data_rate == new_data_rate &&
1507 		    old_active_planes == new_active_planes)
1508 			continue;
1509 
1510 		new_bw_state = intel_atomic_get_bw_state(state);
1511 		if (IS_ERR(new_bw_state))
1512 			return PTR_ERR(new_bw_state);
1513 
1514 		new_bw_state->data_rate[crtc->pipe] = new_data_rate;
1515 		new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
1516 
1517 		*changed = true;
1518 
1519 		drm_dbg_kms(display->drm,
1520 			    "[CRTC:%d:%s] data rate %u num active planes %u\n",
1521 			    crtc->base.base.id, crtc->base.name,
1522 			    new_bw_state->data_rate[crtc->pipe],
1523 			    new_bw_state->num_active_planes[crtc->pipe]);
1524 	}
1525 
1526 	return 0;
1527 }
1528 
1529 static int intel_bw_modeset_checks(struct intel_atomic_state *state)
1530 {
1531 	struct intel_display *display = to_intel_display(state);
1532 	const struct intel_bw_state *old_bw_state;
1533 	struct intel_bw_state *new_bw_state;
1534 
1535 	if (DISPLAY_VER(display) < 9)
1536 		return 0;
1537 
1538 	new_bw_state = intel_atomic_get_bw_state(state);
1539 	if (IS_ERR(new_bw_state))
1540 		return PTR_ERR(new_bw_state);
1541 
1542 	old_bw_state = intel_atomic_get_old_bw_state(state);
1543 
1544 	new_bw_state->active_pipes =
1545 		intel_calc_active_pipes(state, old_bw_state->active_pipes);
1546 
1547 	if (new_bw_state->active_pipes != old_bw_state->active_pipes) {
1548 		int ret;
1549 
1550 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
1551 		if (ret)
1552 			return ret;
1553 	}
1554 
1555 	return 0;
1556 }
1557 
1558 static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
1559 {
1560 	struct intel_display *display = to_intel_display(state);
1561 	const struct intel_crtc_state *old_crtc_state;
1562 	const struct intel_crtc_state *new_crtc_state;
1563 	const struct intel_bw_state *old_bw_state = NULL;
1564 	struct intel_bw_state *new_bw_state = NULL;
1565 	struct intel_crtc *crtc;
1566 	int ret, i;
1567 
1568 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1569 					    new_crtc_state, i) {
1570 		if (intel_crtc_can_enable_sagv(old_crtc_state) ==
1571 		    intel_crtc_can_enable_sagv(new_crtc_state))
1572 			continue;
1573 
1574 		new_bw_state = intel_atomic_get_bw_state(state);
1575 		if (IS_ERR(new_bw_state))
1576 			return PTR_ERR(new_bw_state);
1577 
1578 		old_bw_state = intel_atomic_get_old_bw_state(state);
1579 
1580 		if (intel_crtc_can_enable_sagv(new_crtc_state))
1581 			new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
1582 		else
1583 			new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
1584 	}
1585 
1586 	if (!new_bw_state)
1587 		return 0;
1588 
1589 	if (intel_bw_can_enable_sagv(display, new_bw_state) !=
1590 	    intel_bw_can_enable_sagv(display, old_bw_state)) {
1591 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1592 		if (ret)
1593 			return ret;
1594 	} else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
1595 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
1596 		if (ret)
1597 			return ret;
1598 	}
1599 
1600 	return 0;
1601 }
1602 
1603 int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms)
1604 {
1605 	struct intel_display *display = to_intel_display(state);
1606 	bool changed = false;
1607 	struct intel_bw_state *new_bw_state;
1608 	const struct intel_bw_state *old_bw_state;
1609 	int ret;
1610 
1611 	if (DISPLAY_VER(display) < 9)
1612 		return 0;
1613 
1614 	if (any_ms) {
1615 		ret = intel_bw_modeset_checks(state);
1616 		if (ret)
1617 			return ret;
1618 	}
1619 
1620 	ret = intel_bw_check_sagv_mask(state);
1621 	if (ret)
1622 		return ret;
1623 
1624 	/* FIXME earlier gens need some checks too */
1625 	if (DISPLAY_VER(display) < 11)
1626 		return 0;
1627 
1628 	ret = intel_bw_check_data_rate(state, &changed);
1629 	if (ret)
1630 		return ret;
1631 
1632 	old_bw_state = intel_atomic_get_old_bw_state(state);
1633 	new_bw_state = intel_atomic_get_new_bw_state(state);
1634 
1635 	if (new_bw_state &&
1636 	    intel_bw_can_enable_sagv(display, old_bw_state) !=
1637 	    intel_bw_can_enable_sagv(display, new_bw_state))
1638 		changed = true;
1639 
1640 	/*
1641 	 * If none of our inputs (data rates, number of active
1642 	 * planes, SAGV yes/no) changed then nothing to do here.
1643 	 */
1644 	if (!changed)
1645 		return 0;
1646 
1647 	ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state);
1648 	if (ret)
1649 		return ret;
1650 
1651 	return 0;
1652 }
1653 
1654 static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
1655 				 const struct intel_crtc_state *crtc_state)
1656 {
1657 	struct intel_display *display = to_intel_display(crtc_state);
1658 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1659 
1660 	bw_state->data_rate[crtc->pipe] =
1661 		intel_bw_crtc_data_rate(crtc_state);
1662 	bw_state->num_active_planes[crtc->pipe] =
1663 		intel_bw_crtc_num_active_planes(crtc_state);
1664 
1665 	drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
1666 		    pipe_name(crtc->pipe),
1667 		    bw_state->data_rate[crtc->pipe],
1668 		    bw_state->num_active_planes[crtc->pipe]);
1669 }
1670 
1671 void intel_bw_update_hw_state(struct intel_display *display)
1672 {
1673 	struct intel_bw_state *bw_state =
1674 		to_intel_bw_state(display->bw.obj.state);
1675 	struct intel_crtc *crtc;
1676 
1677 	if (DISPLAY_VER(display) < 9)
1678 		return;
1679 
1680 	bw_state->active_pipes = 0;
1681 	bw_state->pipe_sagv_reject = 0;
1682 
1683 	for_each_intel_crtc(display->drm, crtc) {
1684 		const struct intel_crtc_state *crtc_state =
1685 			to_intel_crtc_state(crtc->base.state);
1686 		enum pipe pipe = crtc->pipe;
1687 
1688 		if (crtc_state->hw.active)
1689 			bw_state->active_pipes |= BIT(pipe);
1690 
1691 		if (DISPLAY_VER(display) >= 11)
1692 			intel_bw_crtc_update(bw_state, crtc_state);
1693 
1694 		skl_crtc_calc_dbuf_bw(&bw_state->dbuf_bw[pipe], crtc_state);
1695 
1696 		/* initially SAGV has been forced off */
1697 		bw_state->pipe_sagv_reject |= BIT(pipe);
1698 	}
1699 }
1700 
1701 void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
1702 {
1703 	struct intel_display *display = to_intel_display(crtc);
1704 	struct intel_bw_state *bw_state =
1705 		to_intel_bw_state(display->bw.obj.state);
1706 	enum pipe pipe = crtc->pipe;
1707 
1708 	if (DISPLAY_VER(display) < 9)
1709 		return;
1710 
1711 	bw_state->data_rate[pipe] = 0;
1712 	bw_state->num_active_planes[pipe] = 0;
1713 	memset(&bw_state->dbuf_bw[pipe], 0, sizeof(bw_state->dbuf_bw[pipe]));
1714 }
1715 
1716 static struct intel_global_state *
1717 intel_bw_duplicate_state(struct intel_global_obj *obj)
1718 {
1719 	struct intel_bw_state *state;
1720 
1721 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
1722 	if (!state)
1723 		return NULL;
1724 
1725 	return &state->base;
1726 }
1727 
1728 static void intel_bw_destroy_state(struct intel_global_obj *obj,
1729 				   struct intel_global_state *state)
1730 {
1731 	kfree(state);
1732 }
1733 
1734 static const struct intel_global_state_funcs intel_bw_funcs = {
1735 	.atomic_duplicate_state = intel_bw_duplicate_state,
1736 	.atomic_destroy_state = intel_bw_destroy_state,
1737 };
1738 
1739 int intel_bw_init(struct intel_display *display)
1740 {
1741 	struct intel_bw_state *state;
1742 
1743 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1744 	if (!state)
1745 		return -ENOMEM;
1746 
1747 	intel_atomic_global_obj_init(display, &display->bw.obj,
1748 				     &state->base, &intel_bw_funcs);
1749 
1750 	/*
1751 	 * Limit this only if we have SAGV. And for Display version 14 onwards
1752 	 * sagv is handled though pmdemand requests
1753 	 */
1754 	if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
1755 		icl_force_disable_sagv(display, state);
1756 
1757 	return 0;
1758 }
1759 
1760 bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state)
1761 {
1762 	const struct intel_bw_state *new_bw_state, *old_bw_state;
1763 
1764 	new_bw_state = intel_atomic_get_new_bw_state(state);
1765 	old_bw_state = intel_atomic_get_old_bw_state(state);
1766 
1767 	if (new_bw_state &&
1768 	    new_bw_state->qgv_point_peakbw != old_bw_state->qgv_point_peakbw)
1769 		return true;
1770 
1771 	return false;
1772 }
1773 
1774 bool intel_bw_can_enable_sagv(struct intel_display *display,
1775 			      const struct intel_bw_state *bw_state)
1776 {
1777 	if (DISPLAY_VER(display) < 11 &&
1778 	    bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
1779 		return false;
1780 
1781 	return bw_state->pipe_sagv_reject == 0;
1782 }
1783 
1784 int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state)
1785 {
1786 	return bw_state->qgv_point_peakbw;
1787 }
1788