xref: /linux/drivers/gpu/drm/i915/display/intel_bw.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_atomic_state_helper.h>
7 #include <drm/drm_print.h>
8 
9 #include "soc/intel_dram.h"
10 
11 #include "i915_drv.h"
12 #include "i915_reg.h"
13 #include "intel_bw.h"
14 #include "intel_crtc.h"
15 #include "intel_display_core.h"
16 #include "intel_display_regs.h"
17 #include "intel_display_types.h"
18 #include "intel_display_utils.h"
19 #include "intel_mchbar_regs.h"
20 #include "intel_pcode.h"
21 #include "intel_uncore.h"
22 #include "skl_watermark.h"
23 
24 struct intel_bw_state {
25 	struct intel_global_state base;
26 
27 	/*
28 	 * Contains a bit mask, used to determine, whether correspondent
29 	 * pipe allows SAGV or not.
30 	 */
31 	u8 pipe_sagv_reject;
32 
33 	/* bitmask of active pipes */
34 	u8 active_pipes;
35 
36 	/*
37 	 * From MTL onwards, to lock a QGV point, punit expects the peak BW of
38 	 * the selected QGV point as the parameter in multiples of 100MB/s
39 	 */
40 	u16 qgv_point_peakbw;
41 
42 	/*
43 	 * Current QGV points mask, which restricts
44 	 * some particular SAGV states, not to confuse
45 	 * with pipe_sagv_mask.
46 	 */
47 	u16 qgv_points_mask;
48 
49 	unsigned int data_rate[I915_MAX_PIPES];
50 	u8 num_active_planes[I915_MAX_PIPES];
51 };
52 
53 /* Parameters for Qclk Geyserville (QGV) */
54 struct intel_qgv_point {
55 	u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
56 };
57 
58 #define DEPROGBWPCLIMIT		60
59 
60 struct intel_psf_gv_point {
61 	u8 clk; /* clock in multiples of 16.6666 MHz */
62 };
63 
64 struct intel_qgv_info {
65 	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
66 	struct intel_psf_gv_point psf_points[I915_NUM_PSF_GV_POINTS];
67 	u8 num_points;
68 	u8 num_psf_points;
69 	u8 t_bl;
70 	u8 max_numchannels;
71 	u8 channel_width;
72 	u8 deinterleave;
73 };
74 
75 static int dg1_mchbar_read_qgv_point_info(struct intel_display *display,
76 					  struct intel_qgv_point *sp,
77 					  int point)
78 {
79 	struct drm_i915_private *i915 = to_i915(display->drm);
80 	u32 dclk_ratio, dclk_reference;
81 	u32 val;
82 
83 	val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
84 	dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
85 	if (val & DG1_QCLK_REFERENCE)
86 		dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
87 	else
88 		dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
89 	sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
90 
91 	val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
92 	if (val & DG1_GEAR_TYPE)
93 		sp->dclk *= 2;
94 
95 	if (sp->dclk == 0)
96 		return -EINVAL;
97 
98 	val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
99 	sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
100 	sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
101 
102 	val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
103 	sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
104 	sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
105 
106 	sp->t_rc = sp->t_rp + sp->t_ras;
107 
108 	return 0;
109 }
110 
111 static int icl_pcode_read_qgv_point_info(struct intel_display *display,
112 					 struct intel_qgv_point *sp,
113 					 int point)
114 {
115 	u32 val = 0, val2 = 0;
116 	u16 dclk;
117 	int ret;
118 
119 	ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
120 			       ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
121 			       &val, &val2);
122 	if (ret)
123 		return ret;
124 
125 	dclk = val & 0xffff;
126 	sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0),
127 				1000);
128 	sp->t_rp = (val & 0xff0000) >> 16;
129 	sp->t_rcd = (val & 0xff000000) >> 24;
130 
131 	sp->t_rdpre = val2 & 0xff;
132 	sp->t_ras = (val2 & 0xff00) >> 8;
133 
134 	sp->t_rc = sp->t_rp + sp->t_ras;
135 
136 	return 0;
137 }
138 
139 static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
140 					     struct intel_psf_gv_point *points)
141 {
142 	u32 val = 0;
143 	int ret;
144 	int i;
145 
146 	ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
147 			       ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
148 	if (ret)
149 		return ret;
150 
151 	for (i = 0; i < I915_NUM_PSF_GV_POINTS; i++) {
152 		points[i].clk = val & 0xff;
153 		val >>= 8;
154 	}
155 
156 	return 0;
157 }
158 
159 static u16 icl_qgv_points_mask(struct intel_display *display)
160 {
161 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
162 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
163 	u16 qgv_points = 0, psf_points = 0;
164 
165 	/*
166 	 * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
167 	 * it with failure if we try masking any unadvertised points.
168 	 * So need to operate only with those returned from PCode.
169 	 */
170 	if (num_qgv_points > 0)
171 		qgv_points = GENMASK(num_qgv_points - 1, 0);
172 
173 	if (num_psf_gv_points > 0)
174 		psf_points = GENMASK(num_psf_gv_points - 1, 0);
175 
176 	return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
177 }
178 
179 static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
180 {
181 	return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) &
182 			      ICL_PCODE_REQ_QGV_PT_MASK);
183 }
184 
185 static int icl_pcode_restrict_qgv_points(struct intel_display *display,
186 					 u32 points_mask)
187 {
188 	int ret;
189 
190 	if (DISPLAY_VER(display) >= 14)
191 		return 0;
192 
193 	/* bspec says to keep retrying for at least 1 ms */
194 	ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
195 				  points_mask,
196 				  ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
197 				  ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
198 				  1);
199 
200 	if (ret < 0) {
201 		drm_err(display->drm,
202 			"Failed to disable qgv points (0x%x) points: 0x%x\n",
203 			ret, points_mask);
204 		return ret;
205 	}
206 
207 	display->sagv.status = is_sagv_enabled(display, points_mask) ?
208 		I915_SAGV_ENABLED : I915_SAGV_DISABLED;
209 
210 	return 0;
211 }
212 
213 static int mtl_read_qgv_point_info(struct intel_display *display,
214 				   struct intel_qgv_point *sp, int point)
215 {
216 	struct drm_i915_private *i915 = to_i915(display->drm);
217 	u32 val, val2;
218 	u16 dclk;
219 
220 	val = intel_uncore_read(&i915->uncore,
221 				MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
222 	val2 = intel_uncore_read(&i915->uncore,
223 				 MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
224 	dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
225 	sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
226 	sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
227 	sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);
228 
229 	sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2);
230 	sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2);
231 
232 	sp->t_rc = sp->t_rp + sp->t_ras;
233 
234 	return 0;
235 }
236 
237 static int
238 intel_read_qgv_point_info(struct intel_display *display,
239 			  struct intel_qgv_point *sp,
240 			  int point)
241 {
242 	if (DISPLAY_VER(display) >= 14)
243 		return mtl_read_qgv_point_info(display, sp, point);
244 	else if (display->platform.dg1)
245 		return dg1_mchbar_read_qgv_point_info(display, sp, point);
246 	else
247 		return icl_pcode_read_qgv_point_info(display, sp, point);
248 }
249 
250 static int icl_get_qgv_points(struct intel_display *display,
251 			      const struct dram_info *dram_info,
252 			      struct intel_qgv_info *qi,
253 			      bool is_y_tile)
254 {
255 	int i, ret;
256 
257 	qi->num_points = dram_info->num_qgv_points;
258 	qi->num_psf_points = dram_info->num_psf_gv_points;
259 
260 	if (DISPLAY_VER(display) >= 14) {
261 		switch (dram_info->type) {
262 		case INTEL_DRAM_DDR4:
263 			qi->t_bl = 4;
264 			qi->max_numchannels = 2;
265 			qi->channel_width = 64;
266 			qi->deinterleave = 2;
267 			break;
268 		case INTEL_DRAM_DDR5:
269 			qi->t_bl = 8;
270 			qi->max_numchannels = 4;
271 			qi->channel_width = 32;
272 			qi->deinterleave = 2;
273 			break;
274 		case INTEL_DRAM_LPDDR4:
275 		case INTEL_DRAM_LPDDR5:
276 			qi->t_bl = 16;
277 			qi->max_numchannels = 8;
278 			qi->channel_width = 16;
279 			qi->deinterleave = 4;
280 			break;
281 		case INTEL_DRAM_GDDR:
282 		case INTEL_DRAM_GDDR_ECC:
283 			qi->channel_width = 32;
284 			break;
285 		default:
286 			MISSING_CASE(dram_info->type);
287 			return -EINVAL;
288 		}
289 	} else if (DISPLAY_VER(display) >= 12) {
290 		switch (dram_info->type) {
291 		case INTEL_DRAM_DDR4:
292 			qi->t_bl = is_y_tile ? 8 : 4;
293 			qi->max_numchannels = 2;
294 			qi->channel_width = 64;
295 			qi->deinterleave = is_y_tile ? 1 : 2;
296 			break;
297 		case INTEL_DRAM_DDR5:
298 			qi->t_bl = is_y_tile ? 16 : 8;
299 			qi->max_numchannels = 4;
300 			qi->channel_width = 32;
301 			qi->deinterleave = is_y_tile ? 1 : 2;
302 			break;
303 		case INTEL_DRAM_LPDDR4:
304 			if (display->platform.rocketlake) {
305 				qi->t_bl = 8;
306 				qi->max_numchannels = 4;
307 				qi->channel_width = 32;
308 				qi->deinterleave = 2;
309 				break;
310 			}
311 			fallthrough;
312 		case INTEL_DRAM_LPDDR5:
313 			qi->t_bl = 16;
314 			qi->max_numchannels = 8;
315 			qi->channel_width = 16;
316 			qi->deinterleave = is_y_tile ? 2 : 4;
317 			break;
318 		default:
319 			qi->t_bl = 16;
320 			qi->max_numchannels = 1;
321 			break;
322 		}
323 	} else if (DISPLAY_VER(display) == 11) {
324 		qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8;
325 		qi->max_numchannels = 1;
326 	}
327 
328 	if (drm_WARN_ON(display->drm,
329 			qi->num_points > ARRAY_SIZE(qi->points)))
330 		qi->num_points = ARRAY_SIZE(qi->points);
331 
332 	for (i = 0; i < qi->num_points; i++) {
333 		struct intel_qgv_point *sp = &qi->points[i];
334 
335 		ret = intel_read_qgv_point_info(display, sp, i);
336 		if (ret) {
337 			drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i);
338 			return ret;
339 		}
340 
341 		drm_dbg_kms(display->drm,
342 			    "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
343 			    i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
344 			    sp->t_rcd, sp->t_rc);
345 	}
346 
347 	if (qi->num_psf_points > 0) {
348 		ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points);
349 		if (ret) {
350 			drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
351 			qi->num_psf_points = 0;
352 		}
353 
354 		for (i = 0; i < qi->num_psf_points; i++)
355 			drm_dbg_kms(display->drm,
356 				    "PSF GV %d: CLK=%d\n",
357 				    i, qi->psf_points[i].clk);
358 	}
359 
360 	return 0;
361 }
362 
363 static int adl_calc_psf_bw(int clk)
364 {
365 	/*
366 	 * clk is multiples of 16.666MHz (100/6)
367 	 * According to BSpec PSF GV bandwidth is
368 	 * calculated as BW = 64 * clk * 16.666Mhz
369 	 */
370 	return DIV_ROUND_CLOSEST(64 * clk * 100, 6);
371 }
372 
373 static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
374 {
375 	u16 dclk = 0;
376 	int i;
377 
378 	for (i = 0; i < qi->num_points; i++)
379 		dclk = max(dclk, qi->points[i].dclk);
380 
381 	return dclk;
382 }
383 
384 struct intel_sa_info {
385 	u16 displayrtids;
386 	u8 deburst, deprogbwlimit, derating;
387 };
388 
389 static const struct intel_sa_info icl_sa_info = {
390 	.deburst = 8,
391 	.deprogbwlimit = 25, /* GB/s */
392 	.displayrtids = 128,
393 	.derating = 10,
394 };
395 
396 static const struct intel_sa_info tgl_sa_info = {
397 	.deburst = 16,
398 	.deprogbwlimit = 34, /* GB/s */
399 	.displayrtids = 256,
400 	.derating = 10,
401 };
402 
403 static const struct intel_sa_info rkl_sa_info = {
404 	.deburst = 8,
405 	.deprogbwlimit = 20, /* GB/s */
406 	.displayrtids = 128,
407 	.derating = 10,
408 };
409 
410 static const struct intel_sa_info adls_sa_info = {
411 	.deburst = 16,
412 	.deprogbwlimit = 38, /* GB/s */
413 	.displayrtids = 256,
414 	.derating = 10,
415 };
416 
417 static const struct intel_sa_info adlp_sa_info = {
418 	.deburst = 16,
419 	.deprogbwlimit = 38, /* GB/s */
420 	.displayrtids = 256,
421 	.derating = 20,
422 };
423 
424 static const struct intel_sa_info mtl_sa_info = {
425 	.deburst = 32,
426 	.deprogbwlimit = 38, /* GB/s */
427 	.displayrtids = 256,
428 	.derating = 10,
429 };
430 
431 static const struct intel_sa_info xe2_hpd_sa_info = {
432 	.derating = 30,
433 	.deprogbwlimit = 53,
434 	/* Other values not used by simplified algorithm */
435 };
436 
437 static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
438 	.derating = 45,
439 	.deprogbwlimit = 53,
440 	/* Other values not used by simplified algorithm */
441 };
442 
443 static const struct intel_sa_info xe3lpd_sa_info = {
444 	.deburst = 32,
445 	.deprogbwlimit = 65, /* GB/s */
446 	.displayrtids = 256,
447 	.derating = 10,
448 };
449 
450 static const struct intel_sa_info xe3lpd_3002_sa_info = {
451 	.deburst = 32,
452 	.deprogbwlimit = 22, /* GB/s */
453 	.displayrtids = 256,
454 	.derating = 10,
455 };
456 
457 static int icl_get_bw_info(struct intel_display *display,
458 			   const struct dram_info *dram_info,
459 			   const struct intel_sa_info *sa)
460 {
461 	struct intel_qgv_info qi = {};
462 	bool is_y_tile = true; /* assume y tile may be used */
463 	int num_channels = max_t(u8, 1, dram_info->num_channels);
464 	int ipqdepth, ipqdepthpch = 16;
465 	int dclk_max;
466 	int maxdebw;
467 	int num_groups = ARRAY_SIZE(display->bw.max);
468 	int i, ret;
469 
470 	ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
471 	if (ret) {
472 		drm_dbg_kms(display->drm,
473 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
474 		return ret;
475 	}
476 
477 	dclk_max = icl_sagv_max_dclk(&qi);
478 	maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10);
479 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
480 	qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
481 
482 	for (i = 0; i < num_groups; i++) {
483 		struct intel_bw_info *bi = &display->bw.max[i];
484 		int clpchgroup;
485 		int j;
486 
487 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
488 		bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
489 
490 		bi->num_qgv_points = qi.num_points;
491 		bi->num_psf_gv_points = qi.num_psf_points;
492 
493 		for (j = 0; j < qi.num_points; j++) {
494 			const struct intel_qgv_point *sp = &qi.points[j];
495 			int ct, bw;
496 
497 			/*
498 			 * Max row cycle time
499 			 *
500 			 * FIXME what is the logic behind the
501 			 * assumed burst length?
502 			 */
503 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
504 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
505 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
506 
507 			bi->deratedbw[j] = min(maxdebw,
508 					       bw * (100 - sa->derating) / 100);
509 
510 			drm_dbg_kms(display->drm,
511 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
512 				    i, j, bi->num_planes, bi->deratedbw[j]);
513 		}
514 	}
515 	/*
516 	 * In case if SAGV is disabled in BIOS, we always get 1
517 	 * SAGV point, but we can't send PCode commands to restrict it
518 	 * as it will fail and pointless anyway.
519 	 */
520 	if (qi.num_points == 1)
521 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
522 	else
523 		display->sagv.status = I915_SAGV_ENABLED;
524 
525 	return 0;
526 }
527 
528 static int tgl_get_bw_info(struct intel_display *display,
529 			   const struct dram_info *dram_info,
530 			   const struct intel_sa_info *sa)
531 {
532 	struct intel_qgv_info qi = {};
533 	bool is_y_tile = true; /* assume y tile may be used */
534 	int num_channels = max_t(u8, 1, dram_info->num_channels);
535 	int ipqdepth, ipqdepthpch = 16;
536 	int dclk_max;
537 	int maxdebw, peakbw;
538 	int clperchgroup;
539 	int num_groups = ARRAY_SIZE(display->bw.max);
540 	int i, ret;
541 
542 	ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
543 	if (ret) {
544 		drm_dbg_kms(display->drm,
545 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
546 		return ret;
547 	}
548 
549 	if (DISPLAY_VER(display) < 14 &&
550 	    (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
551 		num_channels *= 2;
552 
553 	qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
554 
555 	if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12)
556 		qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
557 
558 	if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
559 		drm_warn(display->drm, "Number of channels exceeds max number of channels.");
560 	if (qi.max_numchannels != 0)
561 		num_channels = min_t(u8, num_channels, qi.max_numchannels);
562 
563 	dclk_max = icl_sagv_max_dclk(&qi);
564 
565 	peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max;
566 	maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 100);
567 
568 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
569 	/*
570 	 * clperchgroup = 4kpagespermempage * clperchperblock,
571 	 * clperchperblock = 8 / num_channels * interleave
572 	 */
573 	clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
574 
575 	for (i = 0; i < num_groups; i++) {
576 		struct intel_bw_info *bi = &display->bw.max[i];
577 		struct intel_bw_info *bi_next;
578 		int clpchgroup;
579 		int j;
580 
581 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
582 
583 		if (i < num_groups - 1) {
584 			bi_next = &display->bw.max[i + 1];
585 
586 			if (clpchgroup < clperchgroup)
587 				bi_next->num_planes = (ipqdepth - clpchgroup) /
588 						       clpchgroup + 1;
589 			else
590 				bi_next->num_planes = 0;
591 		}
592 
593 		bi->num_qgv_points = qi.num_points;
594 		bi->num_psf_gv_points = qi.num_psf_points;
595 
596 		for (j = 0; j < qi.num_points; j++) {
597 			const struct intel_qgv_point *sp = &qi.points[j];
598 			int ct, bw;
599 
600 			/*
601 			 * Max row cycle time
602 			 *
603 			 * FIXME what is the logic behind the
604 			 * assumed burst length?
605 			 */
606 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
607 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
608 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
609 
610 			bi->deratedbw[j] = min(maxdebw,
611 					       bw * (100 - sa->derating) / 100);
612 			bi->peakbw[j] = DIV_ROUND_CLOSEST(sp->dclk *
613 							  num_channels *
614 							  qi.channel_width, 8);
615 
616 			drm_dbg_kms(display->drm,
617 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
618 				    i, j, bi->num_planes, bi->deratedbw[j],
619 				    bi->peakbw[j]);
620 		}
621 
622 		for (j = 0; j < qi.num_psf_points; j++) {
623 			const struct intel_psf_gv_point *sp = &qi.psf_points[j];
624 
625 			bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
626 
627 			drm_dbg_kms(display->drm,
628 				    "BW%d / PSF GV %d: num_planes=%d bw=%u\n",
629 				    i, j, bi->num_planes, bi->psf_bw[j]);
630 		}
631 	}
632 
633 	/*
634 	 * In case if SAGV is disabled in BIOS, we always get 1
635 	 * SAGV point, but we can't send PCode commands to restrict it
636 	 * as it will fail and pointless anyway.
637 	 */
638 	if (qi.num_points == 1)
639 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
640 	else
641 		display->sagv.status = I915_SAGV_ENABLED;
642 
643 	return 0;
644 }
645 
646 static void dg2_get_bw_info(struct intel_display *display)
647 {
648 	unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000;
649 	int num_groups = ARRAY_SIZE(display->bw.max);
650 	int i;
651 
652 	/*
653 	 * DG2 doesn't have SAGV or QGV points, just a constant max bandwidth
654 	 * that doesn't depend on the number of planes enabled. So fill all the
655 	 * plane group with constant bw information for uniformity with other
656 	 * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth,
657 	 * whereas DG2-G11 platforms have 38 GB/s.
658 	 */
659 	for (i = 0; i < num_groups; i++) {
660 		struct intel_bw_info *bi = &display->bw.max[i];
661 
662 		bi->num_planes = 1;
663 		/* Need only one dummy QGV point per group */
664 		bi->num_qgv_points = 1;
665 		bi->deratedbw[0] = deratedbw;
666 	}
667 
668 	display->sagv.status = I915_SAGV_NOT_CONTROLLED;
669 }
670 
671 static int xe2_hpd_get_bw_info(struct intel_display *display,
672 			       const struct dram_info *dram_info,
673 			       const struct intel_sa_info *sa)
674 {
675 	struct intel_qgv_info qi = {};
676 	int num_channels = dram_info->num_channels;
677 	int peakbw, maxdebw;
678 	int ret, i;
679 
680 	ret = icl_get_qgv_points(display, dram_info, &qi, true);
681 	if (ret) {
682 		drm_dbg_kms(display->drm,
683 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
684 		return ret;
685 	}
686 
687 	peakbw = num_channels * qi.channel_width / 8 * icl_sagv_max_dclk(&qi);
688 	maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 10);
689 
690 	for (i = 0; i < qi.num_points; i++) {
691 		const struct intel_qgv_point *point = &qi.points[i];
692 		int bw = num_channels * (qi.channel_width / 8) * point->dclk;
693 
694 		display->bw.max[0].deratedbw[i] =
695 			min(maxdebw, (100 - sa->derating) * bw / 100);
696 		display->bw.max[0].peakbw[i] = bw;
697 
698 		drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
699 			    i, display->bw.max[0].deratedbw[i],
700 			    display->bw.max[0].peakbw[i]);
701 	}
702 
703 	/* Bandwidth does not depend on # of planes; set all groups the same */
704 	display->bw.max[0].num_planes = 1;
705 	display->bw.max[0].num_qgv_points = qi.num_points;
706 	for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
707 		memcpy(&display->bw.max[i], &display->bw.max[0],
708 		       sizeof(display->bw.max[0]));
709 
710 	/*
711 	 * Xe2_HPD should always have exactly two QGV points representing
712 	 * battery and plugged-in operation.
713 	 */
714 	drm_WARN_ON(display->drm, qi.num_points != 2);
715 	display->sagv.status = I915_SAGV_ENABLED;
716 
717 	return 0;
718 }
719 
720 static unsigned int icl_max_bw_index(struct intel_display *display,
721 				     int num_planes, int qgv_point)
722 {
723 	int i;
724 
725 	/*
726 	 * Let's return max bw for 0 planes
727 	 */
728 	num_planes = max(1, num_planes);
729 
730 	for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) {
731 		const struct intel_bw_info *bi =
732 			&display->bw.max[i];
733 
734 		/*
735 		 * Pcode will not expose all QGV points when
736 		 * SAGV is forced to off/min/med/max.
737 		 */
738 		if (qgv_point >= bi->num_qgv_points)
739 			return UINT_MAX;
740 
741 		if (num_planes >= bi->num_planes)
742 			return i;
743 	}
744 
745 	return UINT_MAX;
746 }
747 
748 static unsigned int tgl_max_bw_index(struct intel_display *display,
749 				     int num_planes, int qgv_point)
750 {
751 	int i;
752 
753 	/*
754 	 * Let's return max bw for 0 planes
755 	 */
756 	num_planes = max(1, num_planes);
757 
758 	for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) {
759 		const struct intel_bw_info *bi =
760 			&display->bw.max[i];
761 
762 		/*
763 		 * Pcode will not expose all QGV points when
764 		 * SAGV is forced to off/min/med/max.
765 		 */
766 		if (qgv_point >= bi->num_qgv_points)
767 			return UINT_MAX;
768 
769 		if (num_planes <= bi->num_planes)
770 			return i;
771 	}
772 
773 	return 0;
774 }
775 
776 static unsigned int adl_psf_bw(struct intel_display *display,
777 			       int psf_gv_point)
778 {
779 	const struct intel_bw_info *bi =
780 			&display->bw.max[0];
781 
782 	return bi->psf_bw[psf_gv_point];
783 }
784 
785 static unsigned int icl_qgv_bw(struct intel_display *display,
786 			       int num_active_planes, int qgv_point)
787 {
788 	unsigned int idx;
789 
790 	if (DISPLAY_VER(display) >= 12)
791 		idx = tgl_max_bw_index(display, num_active_planes, qgv_point);
792 	else
793 		idx = icl_max_bw_index(display, num_active_planes, qgv_point);
794 
795 	if (idx >= ARRAY_SIZE(display->bw.max))
796 		return 0;
797 
798 	return display->bw.max[idx].deratedbw[qgv_point];
799 }
800 
801 void intel_bw_init_hw(struct intel_display *display)
802 {
803 	const struct dram_info *dram_info = intel_dram_info(display->drm);
804 
805 	if (!HAS_DISPLAY(display))
806 		return;
807 
808 	if (DISPLAY_VERx100(display) >= 3002)
809 		tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
810 	else if (DISPLAY_VER(display) >= 30)
811 		tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
812 	else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx &&
813 		 dram_info->type == INTEL_DRAM_GDDR_ECC)
814 		xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
815 	else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx)
816 		xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
817 	else if (DISPLAY_VER(display) >= 14)
818 		tgl_get_bw_info(display, dram_info, &mtl_sa_info);
819 	else if (display->platform.dg2)
820 		dg2_get_bw_info(display);
821 	else if (display->platform.alderlake_p)
822 		tgl_get_bw_info(display, dram_info, &adlp_sa_info);
823 	else if (display->platform.alderlake_s)
824 		tgl_get_bw_info(display, dram_info, &adls_sa_info);
825 	else if (display->platform.rocketlake)
826 		tgl_get_bw_info(display, dram_info, &rkl_sa_info);
827 	else if (DISPLAY_VER(display) == 12)
828 		tgl_get_bw_info(display, dram_info, &tgl_sa_info);
829 	else if (DISPLAY_VER(display) == 11)
830 		icl_get_bw_info(display, dram_info, &icl_sa_info);
831 }
832 
833 static unsigned int intel_bw_num_active_planes(struct intel_display *display,
834 					       const struct intel_bw_state *bw_state)
835 {
836 	unsigned int num_active_planes = 0;
837 	enum pipe pipe;
838 
839 	for_each_pipe(display, pipe)
840 		num_active_planes += bw_state->num_active_planes[pipe];
841 
842 	return num_active_planes;
843 }
844 
845 static unsigned int intel_bw_data_rate(struct intel_display *display,
846 				       const struct intel_bw_state *bw_state)
847 {
848 	unsigned int data_rate = 0;
849 	enum pipe pipe;
850 
851 	for_each_pipe(display, pipe)
852 		data_rate += bw_state->data_rate[pipe];
853 
854 	if (DISPLAY_VER(display) >= 13 && intel_display_vtd_active(display))
855 		data_rate = DIV_ROUND_UP(data_rate * 105, 100);
856 
857 	return data_rate;
858 }
859 
860 struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state)
861 {
862 	return container_of(obj_state, struct intel_bw_state, base);
863 }
864 
865 struct intel_bw_state *
866 intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
867 {
868 	struct intel_display *display = to_intel_display(state);
869 	struct intel_global_state *bw_state;
870 
871 	bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj);
872 
873 	return to_intel_bw_state(bw_state);
874 }
875 
876 struct intel_bw_state *
877 intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
878 {
879 	struct intel_display *display = to_intel_display(state);
880 	struct intel_global_state *bw_state;
881 
882 	bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj);
883 
884 	return to_intel_bw_state(bw_state);
885 }
886 
887 struct intel_bw_state *
888 intel_atomic_get_bw_state(struct intel_atomic_state *state)
889 {
890 	struct intel_display *display = to_intel_display(state);
891 	struct intel_global_state *bw_state;
892 
893 	bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj);
894 	if (IS_ERR(bw_state))
895 		return ERR_CAST(bw_state);
896 
897 	return to_intel_bw_state(bw_state);
898 }
899 
900 static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display,
901 					      int num_active_planes)
902 {
903 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
904 	unsigned int max_bw_point = 0;
905 	unsigned int max_bw = 0;
906 	int i;
907 
908 	for (i = 0; i < num_qgv_points; i++) {
909 		unsigned int max_data_rate =
910 			icl_qgv_bw(display, num_active_planes, i);
911 
912 		/*
913 		 * We need to know which qgv point gives us
914 		 * maximum bandwidth in order to disable SAGV
915 		 * if we find that we exceed SAGV block time
916 		 * with watermarks. By that moment we already
917 		 * have those, as it is calculated earlier in
918 		 * intel_atomic_check,
919 		 */
920 		if (max_data_rate > max_bw) {
921 			max_bw_point = BIT(i);
922 			max_bw = max_data_rate;
923 		}
924 	}
925 
926 	return max_bw_point;
927 }
928 
929 static u16 icl_prepare_qgv_points_mask(struct intel_display *display,
930 				       unsigned int qgv_points,
931 				       unsigned int psf_points)
932 {
933 	return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
934 		 ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display);
935 }
936 
937 static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display)
938 {
939 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
940 	unsigned int max_bw_point_mask = 0;
941 	unsigned int max_bw = 0;
942 	int i;
943 
944 	for (i = 0; i < num_psf_gv_points; i++) {
945 		unsigned int max_data_rate = adl_psf_bw(display, i);
946 
947 		if (max_data_rate > max_bw) {
948 			max_bw_point_mask = BIT(i);
949 			max_bw = max_data_rate;
950 		} else if (max_data_rate == max_bw) {
951 			max_bw_point_mask |= BIT(i);
952 		}
953 	}
954 
955 	return max_bw_point_mask;
956 }
957 
958 static void icl_force_disable_sagv(struct intel_display *display,
959 				   struct intel_bw_state *bw_state)
960 {
961 	unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0);
962 	unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display);
963 
964 	bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
965 								qgv_points,
966 								psf_points);
967 
968 	drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n",
969 		    bw_state->qgv_points_mask);
970 
971 	icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
972 }
973 
974 void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
975 {
976 	struct intel_display *display = to_intel_display(state);
977 	const struct intel_bw_state *old_bw_state =
978 		intel_atomic_get_old_bw_state(state);
979 	const struct intel_bw_state *new_bw_state =
980 		intel_atomic_get_new_bw_state(state);
981 	u16 old_mask, new_mask;
982 
983 	if (!new_bw_state)
984 		return;
985 
986 	old_mask = old_bw_state->qgv_points_mask;
987 	new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
988 
989 	if (old_mask == new_mask)
990 		return;
991 
992 	WARN_ON(!new_bw_state->base.changed);
993 
994 	drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
995 		    old_mask, new_mask);
996 
997 	/*
998 	 * Restrict required qgv points before updating the configuration.
999 	 * According to BSpec we can't mask and unmask qgv points at the same
1000 	 * time. Also masking should be done before updating the configuration
1001 	 * and unmasking afterwards.
1002 	 */
1003 	icl_pcode_restrict_qgv_points(display, new_mask);
1004 }
1005 
1006 void icl_sagv_post_plane_update(struct intel_atomic_state *state)
1007 {
1008 	struct intel_display *display = to_intel_display(state);
1009 	const struct intel_bw_state *old_bw_state =
1010 		intel_atomic_get_old_bw_state(state);
1011 	const struct intel_bw_state *new_bw_state =
1012 		intel_atomic_get_new_bw_state(state);
1013 	u16 old_mask, new_mask;
1014 
1015 	if (!new_bw_state)
1016 		return;
1017 
1018 	old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
1019 	new_mask = new_bw_state->qgv_points_mask;
1020 
1021 	if (old_mask == new_mask)
1022 		return;
1023 
1024 	WARN_ON(!new_bw_state->base.changed);
1025 
1026 	drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
1027 		    old_mask, new_mask);
1028 
1029 	/*
1030 	 * Allow required qgv points after updating the configuration.
1031 	 * According to BSpec we can't mask and unmask qgv points at the same
1032 	 * time. Also masking should be done before updating the configuration
1033 	 * and unmasking afterwards.
1034 	 */
1035 	icl_pcode_restrict_qgv_points(display, new_mask);
1036 }
1037 
1038 static int mtl_find_qgv_points(struct intel_display *display,
1039 			       unsigned int data_rate,
1040 			       unsigned int num_active_planes,
1041 			       struct intel_bw_state *new_bw_state)
1042 {
1043 	unsigned int best_rate = UINT_MAX;
1044 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
1045 	unsigned int qgv_peak_bw  = 0;
1046 	int i;
1047 	int ret;
1048 
1049 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1050 	if (ret)
1051 		return ret;
1052 
1053 	/*
1054 	 * If SAGV cannot be enabled, disable the pcode SAGV by passing all 1's
1055 	 * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
1056 	 * not enabled. PM Demand code will clamp the value for the register
1057 	 */
1058 	if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
1059 		new_bw_state->qgv_point_peakbw = U16_MAX;
1060 		drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
1061 		return 0;
1062 	}
1063 
1064 	/*
1065 	 * Find the best QGV point by comparing the data_rate with max data rate
1066 	 * offered per plane group
1067 	 */
1068 	for (i = 0; i < num_qgv_points; i++) {
1069 		unsigned int bw_index =
1070 			tgl_max_bw_index(display, num_active_planes, i);
1071 		unsigned int max_data_rate;
1072 
1073 		if (bw_index >= ARRAY_SIZE(display->bw.max))
1074 			continue;
1075 
1076 		max_data_rate = display->bw.max[bw_index].deratedbw[i];
1077 
1078 		if (max_data_rate < data_rate)
1079 			continue;
1080 
1081 		if (max_data_rate - data_rate < best_rate) {
1082 			best_rate = max_data_rate - data_rate;
1083 			qgv_peak_bw = display->bw.max[bw_index].peakbw[i];
1084 		}
1085 
1086 		drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
1087 			    i, max_data_rate, data_rate, qgv_peak_bw);
1088 	}
1089 
1090 	drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
1091 		    qgv_peak_bw, data_rate);
1092 
1093 	/*
1094 	 * The display configuration cannot be supported if no QGV point
1095 	 * satisfying the required data rate is found
1096 	 */
1097 	if (qgv_peak_bw == 0) {
1098 		drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
1099 			    data_rate, num_active_planes);
1100 		return -EINVAL;
1101 	}
1102 
1103 	/* MTL PM DEMAND expects QGV BW parameter in multiples of 100 mbps */
1104 	new_bw_state->qgv_point_peakbw = DIV_ROUND_CLOSEST(qgv_peak_bw, 100);
1105 
1106 	return 0;
1107 }
1108 
1109 static int icl_find_qgv_points(struct intel_display *display,
1110 			       unsigned int data_rate,
1111 			       unsigned int num_active_planes,
1112 			       const struct intel_bw_state *old_bw_state,
1113 			       struct intel_bw_state *new_bw_state)
1114 {
1115 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
1116 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
1117 	u16 psf_points = 0;
1118 	u16 qgv_points = 0;
1119 	int i;
1120 	int ret;
1121 
1122 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1123 	if (ret)
1124 		return ret;
1125 
1126 	for (i = 0; i < num_qgv_points; i++) {
1127 		unsigned int max_data_rate = icl_qgv_bw(display,
1128 							num_active_planes, i);
1129 		if (max_data_rate >= data_rate)
1130 			qgv_points |= BIT(i);
1131 
1132 		drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
1133 			    i, max_data_rate, data_rate);
1134 	}
1135 
1136 	for (i = 0; i < num_psf_gv_points; i++) {
1137 		unsigned int max_data_rate = adl_psf_bw(display, i);
1138 
1139 		if (max_data_rate >= data_rate)
1140 			psf_points |= BIT(i);
1141 
1142 		drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d"
1143 			    " required %d\n",
1144 			    i, max_data_rate, data_rate);
1145 	}
1146 
1147 	/*
1148 	 * BSpec states that we always should have at least one allowed point
1149 	 * left, so if we couldn't - simply reject the configuration for obvious
1150 	 * reasons.
1151 	 */
1152 	if (qgv_points == 0) {
1153 		drm_dbg_kms(display->drm, "No QGV points provide sufficient memory"
1154 			    " bandwidth %d for display configuration(%d active planes).\n",
1155 			    data_rate, num_active_planes);
1156 		return -EINVAL;
1157 	}
1158 
1159 	if (num_psf_gv_points > 0 && psf_points == 0) {
1160 		drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory"
1161 			    " bandwidth %d for display configuration(%d active planes).\n",
1162 			    data_rate, num_active_planes);
1163 		return -EINVAL;
1164 	}
1165 
1166 	/*
1167 	 * Leave only single point with highest bandwidth, if
1168 	 * we can't enable SAGV due to the increased memory latency it may
1169 	 * cause.
1170 	 */
1171 	if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
1172 		qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
1173 		drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
1174 			    qgv_points);
1175 	}
1176 
1177 	/*
1178 	 * We store the ones which need to be masked as that is what PCode
1179 	 * actually accepts as a parameter.
1180 	 */
1181 	new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
1182 								    qgv_points,
1183 								    psf_points);
1184 	/*
1185 	 * If the actual mask had changed we need to make sure that
1186 	 * the commits are serialized(in case this is a nomodeset, nonblocking)
1187 	 */
1188 	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
1189 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1190 		if (ret)
1191 			return ret;
1192 	}
1193 
1194 	return 0;
1195 }
1196 
1197 static int intel_bw_check_qgv_points(struct intel_display *display,
1198 				     const struct intel_bw_state *old_bw_state,
1199 				     struct intel_bw_state *new_bw_state)
1200 {
1201 	unsigned int data_rate = intel_bw_data_rate(display, new_bw_state);
1202 	unsigned int num_active_planes =
1203 			intel_bw_num_active_planes(display, new_bw_state);
1204 
1205 	data_rate = DIV_ROUND_UP(data_rate, 1000);
1206 
1207 	if (DISPLAY_VER(display) >= 14)
1208 		return mtl_find_qgv_points(display, data_rate, num_active_planes,
1209 					   new_bw_state);
1210 	else
1211 		return icl_find_qgv_points(display, data_rate, num_active_planes,
1212 					   old_bw_state, new_bw_state);
1213 }
1214 
1215 static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
1216 {
1217 	struct intel_display *display = to_intel_display(state);
1218 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1219 	struct intel_crtc *crtc;
1220 	int i;
1221 
1222 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1223 					    new_crtc_state, i) {
1224 		unsigned int old_data_rate =
1225 			intel_crtc_bw_data_rate(old_crtc_state);
1226 		unsigned int new_data_rate =
1227 			intel_crtc_bw_data_rate(new_crtc_state);
1228 		unsigned int old_active_planes =
1229 			intel_crtc_bw_num_active_planes(old_crtc_state);
1230 		unsigned int new_active_planes =
1231 			intel_crtc_bw_num_active_planes(new_crtc_state);
1232 		struct intel_bw_state *new_bw_state;
1233 
1234 		/*
1235 		 * Avoid locking the bw state when
1236 		 * nothing significant has changed.
1237 		 */
1238 		if (old_data_rate == new_data_rate &&
1239 		    old_active_planes == new_active_planes)
1240 			continue;
1241 
1242 		new_bw_state = intel_atomic_get_bw_state(state);
1243 		if (IS_ERR(new_bw_state))
1244 			return PTR_ERR(new_bw_state);
1245 
1246 		new_bw_state->data_rate[crtc->pipe] = new_data_rate;
1247 		new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
1248 
1249 		*changed = true;
1250 
1251 		drm_dbg_kms(display->drm,
1252 			    "[CRTC:%d:%s] data rate %u num active planes %u\n",
1253 			    crtc->base.base.id, crtc->base.name,
1254 			    new_bw_state->data_rate[crtc->pipe],
1255 			    new_bw_state->num_active_planes[crtc->pipe]);
1256 	}
1257 
1258 	return 0;
1259 }
1260 
1261 static int intel_bw_modeset_checks(struct intel_atomic_state *state)
1262 {
1263 	const struct intel_bw_state *old_bw_state;
1264 	struct intel_bw_state *new_bw_state;
1265 	int ret;
1266 
1267 	if (!intel_any_crtc_active_changed(state))
1268 		return 0;
1269 
1270 	new_bw_state = intel_atomic_get_bw_state(state);
1271 	if (IS_ERR(new_bw_state))
1272 		return PTR_ERR(new_bw_state);
1273 
1274 	old_bw_state = intel_atomic_get_old_bw_state(state);
1275 
1276 	new_bw_state->active_pipes =
1277 		intel_calc_active_pipes(state, old_bw_state->active_pipes);
1278 
1279 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1280 	if (ret)
1281 		return ret;
1282 
1283 	return 0;
1284 }
1285 
1286 static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
1287 {
1288 	struct intel_display *display = to_intel_display(state);
1289 	const struct intel_crtc_state *old_crtc_state;
1290 	const struct intel_crtc_state *new_crtc_state;
1291 	const struct intel_bw_state *old_bw_state = NULL;
1292 	struct intel_bw_state *new_bw_state = NULL;
1293 	struct intel_crtc *crtc;
1294 	int ret, i;
1295 
1296 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1297 					    new_crtc_state, i) {
1298 		if (intel_crtc_can_enable_sagv(old_crtc_state) ==
1299 		    intel_crtc_can_enable_sagv(new_crtc_state))
1300 			continue;
1301 
1302 		new_bw_state = intel_atomic_get_bw_state(state);
1303 		if (IS_ERR(new_bw_state))
1304 			return PTR_ERR(new_bw_state);
1305 
1306 		old_bw_state = intel_atomic_get_old_bw_state(state);
1307 
1308 		if (intel_crtc_can_enable_sagv(new_crtc_state))
1309 			new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
1310 		else
1311 			new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
1312 	}
1313 
1314 	if (!new_bw_state)
1315 		return 0;
1316 
1317 	if (intel_bw_can_enable_sagv(display, new_bw_state) !=
1318 	    intel_bw_can_enable_sagv(display, old_bw_state)) {
1319 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1320 		if (ret)
1321 			return ret;
1322 	} else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
1323 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
1324 		if (ret)
1325 			return ret;
1326 	}
1327 
1328 	return 0;
1329 }
1330 
1331 int intel_bw_atomic_check(struct intel_atomic_state *state)
1332 {
1333 	struct intel_display *display = to_intel_display(state);
1334 	bool changed = false;
1335 	struct intel_bw_state *new_bw_state;
1336 	const struct intel_bw_state *old_bw_state;
1337 	int ret;
1338 
1339 	if (DISPLAY_VER(display) < 9)
1340 		return 0;
1341 
1342 	ret = intel_bw_modeset_checks(state);
1343 	if (ret)
1344 		return ret;
1345 
1346 	ret = intel_bw_check_sagv_mask(state);
1347 	if (ret)
1348 		return ret;
1349 
1350 	/* FIXME earlier gens need some checks too */
1351 	if (DISPLAY_VER(display) < 11)
1352 		return 0;
1353 
1354 	ret = intel_bw_check_data_rate(state, &changed);
1355 	if (ret)
1356 		return ret;
1357 
1358 	old_bw_state = intel_atomic_get_old_bw_state(state);
1359 	new_bw_state = intel_atomic_get_new_bw_state(state);
1360 
1361 	if (new_bw_state &&
1362 	    intel_bw_can_enable_sagv(display, old_bw_state) !=
1363 	    intel_bw_can_enable_sagv(display, new_bw_state))
1364 		changed = true;
1365 
1366 	/*
1367 	 * If none of our inputs (data rates, number of active
1368 	 * planes, SAGV yes/no) changed then nothing to do here.
1369 	 */
1370 	if (!changed)
1371 		return 0;
1372 
1373 	ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state);
1374 	if (ret)
1375 		return ret;
1376 
1377 	return 0;
1378 }
1379 
1380 static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
1381 				 const struct intel_crtc_state *crtc_state)
1382 {
1383 	struct intel_display *display = to_intel_display(crtc_state);
1384 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1385 
1386 	bw_state->data_rate[crtc->pipe] =
1387 		intel_crtc_bw_data_rate(crtc_state);
1388 	bw_state->num_active_planes[crtc->pipe] =
1389 		intel_crtc_bw_num_active_planes(crtc_state);
1390 
1391 	drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
1392 		    pipe_name(crtc->pipe),
1393 		    bw_state->data_rate[crtc->pipe],
1394 		    bw_state->num_active_planes[crtc->pipe]);
1395 }
1396 
1397 void intel_bw_update_hw_state(struct intel_display *display)
1398 {
1399 	struct intel_bw_state *bw_state =
1400 		to_intel_bw_state(display->bw.obj.state);
1401 	struct intel_crtc *crtc;
1402 
1403 	if (DISPLAY_VER(display) < 9)
1404 		return;
1405 
1406 	bw_state->active_pipes = 0;
1407 	bw_state->pipe_sagv_reject = 0;
1408 
1409 	for_each_intel_crtc(display->drm, crtc) {
1410 		const struct intel_crtc_state *crtc_state =
1411 			to_intel_crtc_state(crtc->base.state);
1412 		enum pipe pipe = crtc->pipe;
1413 
1414 		if (crtc_state->hw.active)
1415 			bw_state->active_pipes |= BIT(pipe);
1416 
1417 		if (DISPLAY_VER(display) >= 11)
1418 			intel_bw_crtc_update(bw_state, crtc_state);
1419 
1420 		/* initially SAGV has been forced off */
1421 		bw_state->pipe_sagv_reject |= BIT(pipe);
1422 	}
1423 }
1424 
1425 void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
1426 {
1427 	struct intel_display *display = to_intel_display(crtc);
1428 	struct intel_bw_state *bw_state =
1429 		to_intel_bw_state(display->bw.obj.state);
1430 	enum pipe pipe = crtc->pipe;
1431 
1432 	if (DISPLAY_VER(display) < 9)
1433 		return;
1434 
1435 	bw_state->data_rate[pipe] = 0;
1436 	bw_state->num_active_planes[pipe] = 0;
1437 }
1438 
1439 static struct intel_global_state *
1440 intel_bw_duplicate_state(struct intel_global_obj *obj)
1441 {
1442 	struct intel_bw_state *state;
1443 
1444 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
1445 	if (!state)
1446 		return NULL;
1447 
1448 	return &state->base;
1449 }
1450 
1451 static void intel_bw_destroy_state(struct intel_global_obj *obj,
1452 				   struct intel_global_state *state)
1453 {
1454 	kfree(state);
1455 }
1456 
1457 static const struct intel_global_state_funcs intel_bw_funcs = {
1458 	.atomic_duplicate_state = intel_bw_duplicate_state,
1459 	.atomic_destroy_state = intel_bw_destroy_state,
1460 };
1461 
1462 int intel_bw_init(struct intel_display *display)
1463 {
1464 	struct intel_bw_state *state;
1465 
1466 	state = kzalloc(sizeof(*state), GFP_KERNEL);
1467 	if (!state)
1468 		return -ENOMEM;
1469 
1470 	intel_atomic_global_obj_init(display, &display->bw.obj,
1471 				     &state->base, &intel_bw_funcs);
1472 
1473 	/*
1474 	 * Limit this only if we have SAGV. And for Display version 14 onwards
1475 	 * sagv is handled though pmdemand requests
1476 	 */
1477 	if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
1478 		icl_force_disable_sagv(display, state);
1479 
1480 	return 0;
1481 }
1482 
1483 bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state)
1484 {
1485 	const struct intel_bw_state *new_bw_state, *old_bw_state;
1486 
1487 	new_bw_state = intel_atomic_get_new_bw_state(state);
1488 	old_bw_state = intel_atomic_get_old_bw_state(state);
1489 
1490 	if (new_bw_state &&
1491 	    new_bw_state->qgv_point_peakbw != old_bw_state->qgv_point_peakbw)
1492 		return true;
1493 
1494 	return false;
1495 }
1496 
1497 bool intel_bw_can_enable_sagv(struct intel_display *display,
1498 			      const struct intel_bw_state *bw_state)
1499 {
1500 	if (DISPLAY_VER(display) < 11 &&
1501 	    bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
1502 		return false;
1503 
1504 	return bw_state->pipe_sagv_reject == 0;
1505 }
1506 
1507 int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state)
1508 {
1509 	return bw_state->qgv_point_peakbw;
1510 }
1511