xref: /linux/drivers/gpu/drm/i915/display/intel_bw.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #include <drm/drm_atomic_state_helper.h>
7 #include <drm/drm_print.h>
8 
9 #include "i915_reg.h"
10 #include "intel_bw.h"
11 #include "intel_crtc.h"
12 #include "intel_display_core.h"
13 #include "intel_display_regs.h"
14 #include "intel_display_types.h"
15 #include "intel_display_utils.h"
16 #include "intel_dram.h"
17 #include "intel_mchbar_regs.h"
18 #include "intel_pcode.h"
19 #include "intel_uncore.h"
20 #include "skl_watermark.h"
21 
22 struct intel_bw_state {
23 	struct intel_global_state base;
24 
25 	/*
26 	 * Contains a bit mask, used to determine, whether correspondent
27 	 * pipe allows SAGV or not.
28 	 */
29 	u8 pipe_sagv_reject;
30 
31 	/* bitmask of active pipes */
32 	u8 active_pipes;
33 
34 	/*
35 	 * From MTL onwards, to lock a QGV point, punit expects the peak BW of
36 	 * the selected QGV point as the parameter in multiples of 100MB/s
37 	 */
38 	u16 qgv_point_peakbw;
39 
40 	/*
41 	 * Current QGV points mask, which restricts
42 	 * some particular SAGV states, not to confuse
43 	 * with pipe_sagv_mask.
44 	 */
45 	u16 qgv_points_mask;
46 
47 	unsigned int data_rate[I915_MAX_PIPES];
48 	u8 num_active_planes[I915_MAX_PIPES];
49 };
50 
51 /* Parameters for Qclk Geyserville (QGV) */
52 struct intel_qgv_point {
53 	u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
54 };
55 
56 #define DEPROGBWPCLIMIT		60
57 
58 struct intel_psf_gv_point {
59 	u8 clk; /* clock in multiples of 16.6666 MHz */
60 };
61 
62 struct intel_qgv_info {
63 	struct intel_qgv_point points[I915_NUM_QGV_POINTS];
64 	struct intel_psf_gv_point psf_points[I915_NUM_PSF_GV_POINTS];
65 	u8 num_points;
66 	u8 num_psf_points;
67 	u8 t_bl;
68 	u8 max_numchannels;
69 	u8 channel_width;
70 	u8 deinterleave;
71 };
72 
dg1_mchbar_read_qgv_point_info(struct intel_display * display,struct intel_qgv_point * sp,int point)73 static int dg1_mchbar_read_qgv_point_info(struct intel_display *display,
74 					  struct intel_qgv_point *sp,
75 					  int point)
76 {
77 	struct intel_uncore *uncore = to_intel_uncore(display->drm);
78 	u32 dclk_ratio, dclk_reference;
79 	u32 val;
80 
81 	val = intel_uncore_read(uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
82 	dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
83 	if (val & DG1_QCLK_REFERENCE)
84 		dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
85 	else
86 		dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
87 	sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000);
88 
89 	val = intel_uncore_read(uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
90 	if (val & DG1_GEAR_TYPE)
91 		sp->dclk *= 2;
92 
93 	if (sp->dclk == 0)
94 		return -EINVAL;
95 
96 	val = intel_uncore_read(uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
97 	sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
98 	sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
99 
100 	val = intel_uncore_read(uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
101 	sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
102 	sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
103 
104 	sp->t_rc = sp->t_rp + sp->t_ras;
105 
106 	return 0;
107 }
108 
icl_pcode_read_qgv_point_info(struct intel_display * display,struct intel_qgv_point * sp,int point)109 static int icl_pcode_read_qgv_point_info(struct intel_display *display,
110 					 struct intel_qgv_point *sp,
111 					 int point)
112 {
113 	u32 val = 0, val2 = 0;
114 	u16 dclk;
115 	int ret;
116 
117 	ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
118 			       ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
119 			       &val, &val2);
120 	if (ret)
121 		return ret;
122 
123 	dclk = val & 0xffff;
124 	sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0),
125 				1000);
126 	sp->t_rp = (val & 0xff0000) >> 16;
127 	sp->t_rcd = (val & 0xff000000) >> 24;
128 
129 	sp->t_rdpre = val2 & 0xff;
130 	sp->t_ras = (val2 & 0xff00) >> 8;
131 
132 	sp->t_rc = sp->t_rp + sp->t_ras;
133 
134 	return 0;
135 }
136 
adls_pcode_read_psf_gv_point_info(struct intel_display * display,struct intel_psf_gv_point * points)137 static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
138 					     struct intel_psf_gv_point *points)
139 {
140 	u32 val = 0;
141 	int ret;
142 	int i;
143 
144 	ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
145 			       ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
146 	if (ret)
147 		return ret;
148 
149 	for (i = 0; i < I915_NUM_PSF_GV_POINTS; i++) {
150 		points[i].clk = val & 0xff;
151 		val >>= 8;
152 	}
153 
154 	return 0;
155 }
156 
icl_qgv_points_mask(struct intel_display * display)157 static u16 icl_qgv_points_mask(struct intel_display *display)
158 {
159 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
160 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
161 	u16 qgv_points = 0, psf_points = 0;
162 
163 	/*
164 	 * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
165 	 * it with failure if we try masking any unadvertised points.
166 	 * So need to operate only with those returned from PCode.
167 	 */
168 	if (num_qgv_points > 0)
169 		qgv_points = GENMASK(num_qgv_points - 1, 0);
170 
171 	if (num_psf_gv_points > 0)
172 		psf_points = GENMASK(num_psf_gv_points - 1, 0);
173 
174 	return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points);
175 }
176 
is_sagv_enabled(struct intel_display * display,u16 points_mask)177 static bool is_sagv_enabled(struct intel_display *display, u16 points_mask)
178 {
179 	return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) &
180 			      ICL_PCODE_REQ_QGV_PT_MASK);
181 }
182 
icl_pcode_restrict_qgv_points(struct intel_display * display,u32 points_mask)183 static int icl_pcode_restrict_qgv_points(struct intel_display *display,
184 					 u32 points_mask)
185 {
186 	int ret;
187 
188 	if (DISPLAY_VER(display) >= 14)
189 		return 0;
190 
191 	/* bspec says to keep retrying for at least 1 ms */
192 	ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
193 				  points_mask,
194 				  ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
195 				  ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
196 				  1);
197 
198 	if (ret < 0) {
199 		drm_err(display->drm,
200 			"Failed to disable qgv points (0x%x) points: 0x%x\n",
201 			ret, points_mask);
202 		return ret;
203 	}
204 
205 	display->sagv.status = is_sagv_enabled(display, points_mask) ?
206 		I915_SAGV_ENABLED : I915_SAGV_DISABLED;
207 
208 	return 0;
209 }
210 
mtl_read_qgv_point_info(struct intel_display * display,struct intel_qgv_point * sp,int point)211 static int mtl_read_qgv_point_info(struct intel_display *display,
212 				   struct intel_qgv_point *sp, int point)
213 {
214 	struct intel_uncore *uncore = to_intel_uncore(display->drm);
215 	u32 val, val2;
216 	u16 dclk;
217 
218 	val = intel_uncore_read(uncore, MTL_MEM_SS_INFO_QGV_POINT_LOW(point));
219 	val2 = intel_uncore_read(uncore, MTL_MEM_SS_INFO_QGV_POINT_HIGH(point));
220 	dclk = REG_FIELD_GET(MTL_DCLK_MASK, val);
221 	sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000);
222 	sp->t_rp = REG_FIELD_GET(MTL_TRP_MASK, val);
223 	sp->t_rcd = REG_FIELD_GET(MTL_TRCD_MASK, val);
224 
225 	sp->t_rdpre = REG_FIELD_GET(MTL_TRDPRE_MASK, val2);
226 	sp->t_ras = REG_FIELD_GET(MTL_TRAS_MASK, val2);
227 
228 	sp->t_rc = sp->t_rp + sp->t_ras;
229 
230 	return 0;
231 }
232 
233 static int
intel_read_qgv_point_info(struct intel_display * display,struct intel_qgv_point * sp,int point)234 intel_read_qgv_point_info(struct intel_display *display,
235 			  struct intel_qgv_point *sp,
236 			  int point)
237 {
238 	if (DISPLAY_VER(display) >= 14)
239 		return mtl_read_qgv_point_info(display, sp, point);
240 	else if (display->platform.dg1)
241 		return dg1_mchbar_read_qgv_point_info(display, sp, point);
242 	else
243 		return icl_pcode_read_qgv_point_info(display, sp, point);
244 }
245 
icl_get_qgv_points(struct intel_display * display,const struct dram_info * dram_info,struct intel_qgv_info * qi,bool is_y_tile)246 static int icl_get_qgv_points(struct intel_display *display,
247 			      const struct dram_info *dram_info,
248 			      struct intel_qgv_info *qi,
249 			      bool is_y_tile)
250 {
251 	int i, ret;
252 
253 	qi->num_points = dram_info->num_qgv_points;
254 	qi->num_psf_points = dram_info->num_psf_gv_points;
255 
256 	if (DISPLAY_VER(display) >= 14) {
257 		switch (dram_info->type) {
258 		case INTEL_DRAM_DDR4:
259 			qi->t_bl = 4;
260 			qi->max_numchannels = 2;
261 			qi->channel_width = 64;
262 			qi->deinterleave = 2;
263 			break;
264 		case INTEL_DRAM_DDR5:
265 			qi->t_bl = 8;
266 			qi->max_numchannels = 4;
267 			qi->channel_width = 32;
268 			qi->deinterleave = 2;
269 			break;
270 		case INTEL_DRAM_LPDDR4:
271 		case INTEL_DRAM_LPDDR5:
272 			qi->t_bl = 16;
273 			qi->max_numchannels = 8;
274 			qi->channel_width = 16;
275 			qi->deinterleave = 4;
276 			break;
277 		case INTEL_DRAM_GDDR:
278 		case INTEL_DRAM_GDDR_ECC:
279 			qi->channel_width = 32;
280 			break;
281 		default:
282 			MISSING_CASE(dram_info->type);
283 			return -EINVAL;
284 		}
285 	} else if (DISPLAY_VER(display) >= 12) {
286 		switch (dram_info->type) {
287 		case INTEL_DRAM_DDR4:
288 			qi->t_bl = is_y_tile ? 8 : 4;
289 			qi->max_numchannels = 2;
290 			qi->channel_width = 64;
291 			qi->deinterleave = is_y_tile ? 1 : 2;
292 			break;
293 		case INTEL_DRAM_DDR5:
294 			qi->t_bl = is_y_tile ? 16 : 8;
295 			qi->max_numchannels = 4;
296 			qi->channel_width = 32;
297 			qi->deinterleave = is_y_tile ? 1 : 2;
298 			break;
299 		case INTEL_DRAM_LPDDR4:
300 			if (display->platform.rocketlake) {
301 				qi->t_bl = 8;
302 				qi->max_numchannels = 4;
303 				qi->channel_width = 32;
304 				qi->deinterleave = 2;
305 				break;
306 			}
307 			fallthrough;
308 		case INTEL_DRAM_LPDDR5:
309 			qi->t_bl = 16;
310 			qi->max_numchannels = 8;
311 			qi->channel_width = 16;
312 			qi->deinterleave = is_y_tile ? 2 : 4;
313 			break;
314 		default:
315 			qi->t_bl = 16;
316 			qi->max_numchannels = 1;
317 			break;
318 		}
319 	} else if (DISPLAY_VER(display) == 11) {
320 		qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8;
321 		qi->max_numchannels = 1;
322 	}
323 
324 	if (drm_WARN_ON(display->drm,
325 			qi->num_points > ARRAY_SIZE(qi->points)))
326 		qi->num_points = ARRAY_SIZE(qi->points);
327 
328 	for (i = 0; i < qi->num_points; i++) {
329 		struct intel_qgv_point *sp = &qi->points[i];
330 
331 		ret = intel_read_qgv_point_info(display, sp, i);
332 		if (ret) {
333 			drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i);
334 			return ret;
335 		}
336 
337 		drm_dbg_kms(display->drm,
338 			    "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
339 			    i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
340 			    sp->t_rcd, sp->t_rc);
341 	}
342 
343 	if (qi->num_psf_points > 0) {
344 		ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points);
345 		if (ret) {
346 			drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
347 			qi->num_psf_points = 0;
348 		}
349 
350 		for (i = 0; i < qi->num_psf_points; i++)
351 			drm_dbg_kms(display->drm,
352 				    "PSF GV %d: CLK=%d\n",
353 				    i, qi->psf_points[i].clk);
354 	}
355 
356 	return 0;
357 }
358 
adl_calc_psf_bw(int clk)359 static int adl_calc_psf_bw(int clk)
360 {
361 	/*
362 	 * clk is multiples of 16.666MHz (100/6)
363 	 * According to BSpec PSF GV bandwidth is
364 	 * calculated as BW = 64 * clk * 16.666Mhz
365 	 */
366 	return DIV_ROUND_CLOSEST(64 * clk * 100, 6);
367 }
368 
icl_sagv_max_dclk(const struct intel_qgv_info * qi)369 static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
370 {
371 	u16 dclk = 0;
372 	int i;
373 
374 	for (i = 0; i < qi->num_points; i++)
375 		dclk = max(dclk, qi->points[i].dclk);
376 
377 	return dclk;
378 }
379 
380 struct intel_sa_info {
381 	u16 displayrtids;
382 	u8 deburst, deprogbwlimit, derating;
383 };
384 
385 static const struct intel_sa_info icl_sa_info = {
386 	.deburst = 8,
387 	.deprogbwlimit = 25, /* GB/s */
388 	.displayrtids = 128,
389 	.derating = 10,
390 };
391 
392 static const struct intel_sa_info tgl_sa_info = {
393 	.deburst = 16,
394 	.deprogbwlimit = 34, /* GB/s */
395 	.displayrtids = 256,
396 	.derating = 10,
397 };
398 
399 static const struct intel_sa_info rkl_sa_info = {
400 	.deburst = 8,
401 	.deprogbwlimit = 20, /* GB/s */
402 	.displayrtids = 128,
403 	.derating = 10,
404 };
405 
406 static const struct intel_sa_info adls_sa_info = {
407 	.deburst = 16,
408 	.deprogbwlimit = 38, /* GB/s */
409 	.displayrtids = 256,
410 	.derating = 10,
411 };
412 
413 static const struct intel_sa_info adlp_sa_info = {
414 	.deburst = 16,
415 	.deprogbwlimit = 38, /* GB/s */
416 	.displayrtids = 256,
417 	.derating = 20,
418 };
419 
420 static const struct intel_sa_info mtl_sa_info = {
421 	.deburst = 32,
422 	.deprogbwlimit = 38, /* GB/s */
423 	.displayrtids = 256,
424 	.derating = 10,
425 };
426 
427 static const struct intel_sa_info xe2_hpd_sa_info = {
428 	.derating = 30,
429 	.deprogbwlimit = 53,
430 	/* Other values not used by simplified algorithm */
431 };
432 
433 static const struct intel_sa_info xe2_hpd_ecc_sa_info = {
434 	.derating = 45,
435 	.deprogbwlimit = 53,
436 	/* Other values not used by simplified algorithm */
437 };
438 
439 static const struct intel_sa_info xe3lpd_sa_info = {
440 	.deburst = 32,
441 	.deprogbwlimit = 65, /* GB/s */
442 	.displayrtids = 256,
443 	.derating = 10,
444 };
445 
446 static const struct intel_sa_info xe3lpd_3002_sa_info = {
447 	.deburst = 32,
448 	.deprogbwlimit = 22, /* GB/s */
449 	.displayrtids = 256,
450 	.derating = 10,
451 };
452 
icl_get_bw_info(struct intel_display * display,const struct dram_info * dram_info,const struct intel_sa_info * sa)453 static int icl_get_bw_info(struct intel_display *display,
454 			   const struct dram_info *dram_info,
455 			   const struct intel_sa_info *sa)
456 {
457 	struct intel_qgv_info qi = {};
458 	bool is_y_tile = true; /* assume y tile may be used */
459 	int num_channels = max_t(u8, 1, dram_info->num_channels);
460 	int ipqdepth, ipqdepthpch = 16;
461 	int dclk_max;
462 	int maxdebw;
463 	int num_groups = ARRAY_SIZE(display->bw.max);
464 	int i, ret;
465 
466 	ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
467 	if (ret) {
468 		drm_dbg_kms(display->drm,
469 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
470 		return ret;
471 	}
472 
473 	dclk_max = icl_sagv_max_dclk(&qi);
474 	maxdebw = min(sa->deprogbwlimit * 1000, dclk_max * 16 * 6 / 10);
475 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
476 	qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
477 
478 	for (i = 0; i < num_groups; i++) {
479 		struct intel_bw_info *bi = &display->bw.max[i];
480 		int clpchgroup;
481 		int j;
482 
483 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
484 		bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
485 
486 		bi->num_qgv_points = qi.num_points;
487 		bi->num_psf_gv_points = qi.num_psf_points;
488 
489 		for (j = 0; j < qi.num_points; j++) {
490 			const struct intel_qgv_point *sp = &qi.points[j];
491 			int ct, bw;
492 
493 			/*
494 			 * Max row cycle time
495 			 *
496 			 * FIXME what is the logic behind the
497 			 * assumed burst length?
498 			 */
499 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
500 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
501 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
502 
503 			bi->deratedbw[j] = min(maxdebw,
504 					       bw * (100 - sa->derating) / 100);
505 
506 			drm_dbg_kms(display->drm,
507 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
508 				    i, j, bi->num_planes, bi->deratedbw[j]);
509 		}
510 	}
511 	/*
512 	 * In case if SAGV is disabled in BIOS, we always get 1
513 	 * SAGV point, but we can't send PCode commands to restrict it
514 	 * as it will fail and pointless anyway.
515 	 */
516 	if (qi.num_points == 1)
517 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
518 	else
519 		display->sagv.status = I915_SAGV_ENABLED;
520 
521 	return 0;
522 }
523 
tgl_get_bw_info(struct intel_display * display,const struct dram_info * dram_info,const struct intel_sa_info * sa)524 static int tgl_get_bw_info(struct intel_display *display,
525 			   const struct dram_info *dram_info,
526 			   const struct intel_sa_info *sa)
527 {
528 	struct intel_qgv_info qi = {};
529 	bool is_y_tile = true; /* assume y tile may be used */
530 	int num_channels = max_t(u8, 1, dram_info->num_channels);
531 	int ipqdepth, ipqdepthpch = 16;
532 	int dclk_max;
533 	int maxdebw, peakbw;
534 	int clperchgroup;
535 	int num_groups = ARRAY_SIZE(display->bw.max);
536 	int i, ret;
537 
538 	ret = icl_get_qgv_points(display, dram_info, &qi, is_y_tile);
539 	if (ret) {
540 		drm_dbg_kms(display->drm,
541 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
542 		return ret;
543 	}
544 
545 	if (DISPLAY_VER(display) < 14 &&
546 	    (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5))
547 		num_channels *= 2;
548 
549 	qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2);
550 
551 	if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12)
552 		qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
553 
554 	if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels)
555 		drm_warn(display->drm, "Number of channels exceeds max number of channels.");
556 	if (qi.max_numchannels != 0)
557 		num_channels = min_t(u8, num_channels, qi.max_numchannels);
558 
559 	dclk_max = icl_sagv_max_dclk(&qi);
560 
561 	peakbw = num_channels * DIV_ROUND_UP(qi.channel_width, 8) * dclk_max;
562 	maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 100);
563 
564 	ipqdepth = min(ipqdepthpch, sa->displayrtids / num_channels);
565 	/*
566 	 * clperchgroup = 4kpagespermempage * clperchperblock,
567 	 * clperchperblock = 8 / num_channels * interleave
568 	 */
569 	clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave;
570 
571 	for (i = 0; i < num_groups; i++) {
572 		struct intel_bw_info *bi = &display->bw.max[i];
573 		struct intel_bw_info *bi_next;
574 		int clpchgroup;
575 		int j;
576 
577 		clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i;
578 
579 		if (i < num_groups - 1) {
580 			bi_next = &display->bw.max[i + 1];
581 
582 			if (clpchgroup < clperchgroup)
583 				bi_next->num_planes = (ipqdepth - clpchgroup) /
584 						       clpchgroup + 1;
585 			else
586 				bi_next->num_planes = 0;
587 		}
588 
589 		bi->num_qgv_points = qi.num_points;
590 		bi->num_psf_gv_points = qi.num_psf_points;
591 
592 		for (j = 0; j < qi.num_points; j++) {
593 			const struct intel_qgv_point *sp = &qi.points[j];
594 			int ct, bw;
595 
596 			/*
597 			 * Max row cycle time
598 			 *
599 			 * FIXME what is the logic behind the
600 			 * assumed burst length?
601 			 */
602 			ct = max_t(int, sp->t_rc, sp->t_rp + sp->t_rcd +
603 				   (clpchgroup - 1) * qi.t_bl + sp->t_rdpre);
604 			bw = DIV_ROUND_UP(sp->dclk * clpchgroup * 32 * num_channels, ct);
605 
606 			bi->deratedbw[j] = min(maxdebw,
607 					       bw * (100 - sa->derating) / 100);
608 			bi->peakbw[j] = DIV_ROUND_CLOSEST(sp->dclk *
609 							  num_channels *
610 							  qi.channel_width, 8);
611 
612 			drm_dbg_kms(display->drm,
613 				    "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n",
614 				    i, j, bi->num_planes, bi->deratedbw[j],
615 				    bi->peakbw[j]);
616 		}
617 
618 		for (j = 0; j < qi.num_psf_points; j++) {
619 			const struct intel_psf_gv_point *sp = &qi.psf_points[j];
620 
621 			bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
622 
623 			drm_dbg_kms(display->drm,
624 				    "BW%d / PSF GV %d: num_planes=%d bw=%u\n",
625 				    i, j, bi->num_planes, bi->psf_bw[j]);
626 		}
627 	}
628 
629 	/*
630 	 * In case if SAGV is disabled in BIOS, we always get 1
631 	 * SAGV point, but we can't send PCode commands to restrict it
632 	 * as it will fail and pointless anyway.
633 	 */
634 	if (qi.num_points == 1)
635 		display->sagv.status = I915_SAGV_NOT_CONTROLLED;
636 	else
637 		display->sagv.status = I915_SAGV_ENABLED;
638 
639 	return 0;
640 }
641 
dg2_get_bw_info(struct intel_display * display)642 static void dg2_get_bw_info(struct intel_display *display)
643 {
644 	unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000;
645 	int num_groups = ARRAY_SIZE(display->bw.max);
646 	int i;
647 
648 	/*
649 	 * DG2 doesn't have SAGV or QGV points, just a constant max bandwidth
650 	 * that doesn't depend on the number of planes enabled. So fill all the
651 	 * plane group with constant bw information for uniformity with other
652 	 * platforms. DG2-G10 platforms have a constant 50 GB/s bandwidth,
653 	 * whereas DG2-G11 platforms have 38 GB/s.
654 	 */
655 	for (i = 0; i < num_groups; i++) {
656 		struct intel_bw_info *bi = &display->bw.max[i];
657 
658 		bi->num_planes = 1;
659 		/* Need only one dummy QGV point per group */
660 		bi->num_qgv_points = 1;
661 		bi->deratedbw[0] = deratedbw;
662 	}
663 
664 	display->sagv.status = I915_SAGV_NOT_CONTROLLED;
665 }
666 
xe2_hpd_get_bw_info(struct intel_display * display,const struct dram_info * dram_info,const struct intel_sa_info * sa)667 static int xe2_hpd_get_bw_info(struct intel_display *display,
668 			       const struct dram_info *dram_info,
669 			       const struct intel_sa_info *sa)
670 {
671 	struct intel_qgv_info qi = {};
672 	int num_channels = dram_info->num_channels;
673 	int peakbw, maxdebw;
674 	int ret, i;
675 
676 	ret = icl_get_qgv_points(display, dram_info, &qi, true);
677 	if (ret) {
678 		drm_dbg_kms(display->drm,
679 			    "Failed to get memory subsystem information, ignoring bandwidth limits");
680 		return ret;
681 	}
682 
683 	peakbw = num_channels * qi.channel_width / 8 * icl_sagv_max_dclk(&qi);
684 	maxdebw = min(sa->deprogbwlimit * 1000, peakbw * DEPROGBWPCLIMIT / 10);
685 
686 	for (i = 0; i < qi.num_points; i++) {
687 		const struct intel_qgv_point *point = &qi.points[i];
688 		int bw = num_channels * (qi.channel_width / 8) * point->dclk;
689 
690 		display->bw.max[0].deratedbw[i] =
691 			min(maxdebw, (100 - sa->derating) * bw / 100);
692 		display->bw.max[0].peakbw[i] = bw;
693 
694 		drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n",
695 			    i, display->bw.max[0].deratedbw[i],
696 			    display->bw.max[0].peakbw[i]);
697 	}
698 
699 	/* Bandwidth does not depend on # of planes; set all groups the same */
700 	display->bw.max[0].num_planes = 1;
701 	display->bw.max[0].num_qgv_points = qi.num_points;
702 	for (i = 1; i < ARRAY_SIZE(display->bw.max); i++)
703 		memcpy(&display->bw.max[i], &display->bw.max[0],
704 		       sizeof(display->bw.max[0]));
705 
706 	/*
707 	 * Xe2_HPD should always have exactly two QGV points representing
708 	 * battery and plugged-in operation.
709 	 */
710 	drm_WARN_ON(display->drm, qi.num_points != 2);
711 	display->sagv.status = I915_SAGV_ENABLED;
712 
713 	return 0;
714 }
715 
icl_max_bw_index(struct intel_display * display,int num_planes,int qgv_point)716 static unsigned int icl_max_bw_index(struct intel_display *display,
717 				     int num_planes, int qgv_point)
718 {
719 	int i;
720 
721 	/*
722 	 * Let's return max bw for 0 planes
723 	 */
724 	num_planes = max(1, num_planes);
725 
726 	for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) {
727 		const struct intel_bw_info *bi =
728 			&display->bw.max[i];
729 
730 		/*
731 		 * Pcode will not expose all QGV points when
732 		 * SAGV is forced to off/min/med/max.
733 		 */
734 		if (qgv_point >= bi->num_qgv_points)
735 			return UINT_MAX;
736 
737 		if (num_planes >= bi->num_planes)
738 			return i;
739 	}
740 
741 	return UINT_MAX;
742 }
743 
tgl_max_bw_index(struct intel_display * display,int num_planes,int qgv_point)744 static unsigned int tgl_max_bw_index(struct intel_display *display,
745 				     int num_planes, int qgv_point)
746 {
747 	int i;
748 
749 	/*
750 	 * Let's return max bw for 0 planes
751 	 */
752 	num_planes = max(1, num_planes);
753 
754 	for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) {
755 		const struct intel_bw_info *bi =
756 			&display->bw.max[i];
757 
758 		/*
759 		 * Pcode will not expose all QGV points when
760 		 * SAGV is forced to off/min/med/max.
761 		 */
762 		if (qgv_point >= bi->num_qgv_points)
763 			return UINT_MAX;
764 
765 		if (num_planes <= bi->num_planes)
766 			return i;
767 	}
768 
769 	return 0;
770 }
771 
adl_psf_bw(struct intel_display * display,int psf_gv_point)772 static unsigned int adl_psf_bw(struct intel_display *display,
773 			       int psf_gv_point)
774 {
775 	const struct intel_bw_info *bi =
776 			&display->bw.max[0];
777 
778 	return bi->psf_bw[psf_gv_point];
779 }
780 
icl_qgv_bw(struct intel_display * display,int num_active_planes,int qgv_point)781 static unsigned int icl_qgv_bw(struct intel_display *display,
782 			       int num_active_planes, int qgv_point)
783 {
784 	unsigned int idx;
785 
786 	if (DISPLAY_VER(display) >= 12)
787 		idx = tgl_max_bw_index(display, num_active_planes, qgv_point);
788 	else
789 		idx = icl_max_bw_index(display, num_active_planes, qgv_point);
790 
791 	if (idx >= ARRAY_SIZE(display->bw.max))
792 		return 0;
793 
794 	return display->bw.max[idx].deratedbw[qgv_point];
795 }
796 
intel_bw_init_hw(struct intel_display * display)797 void intel_bw_init_hw(struct intel_display *display)
798 {
799 	const struct dram_info *dram_info = intel_dram_info(display);
800 
801 	if (!HAS_DISPLAY(display))
802 		return;
803 
804 	/*
805 	 * Starting with Xe3p_LPD, the hardware tells us whether memory has ECC
806 	 * enabled that would impact display bandwidth.  However, so far there
807 	 * are no instructions in Bspec on how to handle that case.  Let's
808 	 * complain if we ever find such a scenario.
809 	 */
810 	if (DISPLAY_VER(display) >= 35)
811 		drm_WARN_ON(display->drm, dram_info->ecc_impacting_de_bw);
812 
813 	if (DISPLAY_VER(display) >= 30) {
814 		if (DISPLAY_VERx100(display) == 3002)
815 			tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info);
816 		else
817 			tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info);
818 	} else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) {
819 		if (dram_info->type == INTEL_DRAM_GDDR_ECC)
820 			xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_ecc_sa_info);
821 		else
822 			xe2_hpd_get_bw_info(display, dram_info, &xe2_hpd_sa_info);
823 	} else if (DISPLAY_VER(display) >= 14) {
824 		tgl_get_bw_info(display, dram_info, &mtl_sa_info);
825 	} else if (display->platform.dg2) {
826 		dg2_get_bw_info(display);
827 	} else if (display->platform.alderlake_p) {
828 		tgl_get_bw_info(display, dram_info, &adlp_sa_info);
829 	} else if (display->platform.alderlake_s) {
830 		tgl_get_bw_info(display, dram_info, &adls_sa_info);
831 	} else if (display->platform.rocketlake) {
832 		tgl_get_bw_info(display, dram_info, &rkl_sa_info);
833 	} else if (DISPLAY_VER(display) == 12) {
834 		tgl_get_bw_info(display, dram_info, &tgl_sa_info);
835 	} else if (DISPLAY_VER(display) == 11) {
836 		icl_get_bw_info(display, dram_info, &icl_sa_info);
837 	}
838 }
839 
intel_bw_num_active_planes(struct intel_display * display,const struct intel_bw_state * bw_state)840 static unsigned int intel_bw_num_active_planes(struct intel_display *display,
841 					       const struct intel_bw_state *bw_state)
842 {
843 	unsigned int num_active_planes = 0;
844 	enum pipe pipe;
845 
846 	for_each_pipe(display, pipe)
847 		num_active_planes += bw_state->num_active_planes[pipe];
848 
849 	return num_active_planes;
850 }
851 
intel_bw_data_rate(struct intel_display * display,const struct intel_bw_state * bw_state)852 static unsigned int intel_bw_data_rate(struct intel_display *display,
853 				       const struct intel_bw_state *bw_state)
854 {
855 	unsigned int data_rate = 0;
856 	enum pipe pipe;
857 
858 	for_each_pipe(display, pipe)
859 		data_rate += bw_state->data_rate[pipe];
860 
861 	if (DISPLAY_VER(display) >= 13 && intel_display_vtd_active(display))
862 		data_rate = DIV_ROUND_UP(data_rate * 105, 100);
863 
864 	return data_rate;
865 }
866 
to_intel_bw_state(struct intel_global_state * obj_state)867 struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state)
868 {
869 	return container_of(obj_state, struct intel_bw_state, base);
870 }
871 
872 struct intel_bw_state *
intel_atomic_get_old_bw_state(struct intel_atomic_state * state)873 intel_atomic_get_old_bw_state(struct intel_atomic_state *state)
874 {
875 	struct intel_display *display = to_intel_display(state);
876 	struct intel_global_state *bw_state;
877 
878 	bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj);
879 
880 	return to_intel_bw_state(bw_state);
881 }
882 
883 struct intel_bw_state *
intel_atomic_get_new_bw_state(struct intel_atomic_state * state)884 intel_atomic_get_new_bw_state(struct intel_atomic_state *state)
885 {
886 	struct intel_display *display = to_intel_display(state);
887 	struct intel_global_state *bw_state;
888 
889 	bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj);
890 
891 	return to_intel_bw_state(bw_state);
892 }
893 
894 struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state * state)895 intel_atomic_get_bw_state(struct intel_atomic_state *state)
896 {
897 	struct intel_display *display = to_intel_display(state);
898 	struct intel_global_state *bw_state;
899 
900 	bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj);
901 	if (IS_ERR(bw_state))
902 		return ERR_CAST(bw_state);
903 
904 	return to_intel_bw_state(bw_state);
905 }
906 
icl_max_bw_qgv_point_mask(struct intel_display * display,int num_active_planes)907 static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display,
908 					      int num_active_planes)
909 {
910 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
911 	unsigned int max_bw_point = 0;
912 	unsigned int max_bw = 0;
913 	int i;
914 
915 	for (i = 0; i < num_qgv_points; i++) {
916 		unsigned int max_data_rate =
917 			icl_qgv_bw(display, num_active_planes, i);
918 
919 		/*
920 		 * We need to know which qgv point gives us
921 		 * maximum bandwidth in order to disable SAGV
922 		 * if we find that we exceed SAGV block time
923 		 * with watermarks. By that moment we already
924 		 * have those, as it is calculated earlier in
925 		 * intel_atomic_check,
926 		 */
927 		if (max_data_rate > max_bw) {
928 			max_bw_point = BIT(i);
929 			max_bw = max_data_rate;
930 		}
931 	}
932 
933 	return max_bw_point;
934 }
935 
icl_prepare_qgv_points_mask(struct intel_display * display,unsigned int qgv_points,unsigned int psf_points)936 static u16 icl_prepare_qgv_points_mask(struct intel_display *display,
937 				       unsigned int qgv_points,
938 				       unsigned int psf_points)
939 {
940 	return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) |
941 		 ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display);
942 }
943 
icl_max_bw_psf_gv_point_mask(struct intel_display * display)944 static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display)
945 {
946 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
947 	unsigned int max_bw_point_mask = 0;
948 	unsigned int max_bw = 0;
949 	int i;
950 
951 	for (i = 0; i < num_psf_gv_points; i++) {
952 		unsigned int max_data_rate = adl_psf_bw(display, i);
953 
954 		if (max_data_rate > max_bw) {
955 			max_bw_point_mask = BIT(i);
956 			max_bw = max_data_rate;
957 		} else if (max_data_rate == max_bw) {
958 			max_bw_point_mask |= BIT(i);
959 		}
960 	}
961 
962 	return max_bw_point_mask;
963 }
964 
icl_force_disable_sagv(struct intel_display * display,struct intel_bw_state * bw_state)965 static void icl_force_disable_sagv(struct intel_display *display,
966 				   struct intel_bw_state *bw_state)
967 {
968 	unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0);
969 	unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display);
970 
971 	bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
972 								qgv_points,
973 								psf_points);
974 
975 	drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n",
976 		    bw_state->qgv_points_mask);
977 
978 	icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask);
979 }
980 
icl_sagv_pre_plane_update(struct intel_atomic_state * state)981 void icl_sagv_pre_plane_update(struct intel_atomic_state *state)
982 {
983 	struct intel_display *display = to_intel_display(state);
984 	const struct intel_bw_state *old_bw_state =
985 		intel_atomic_get_old_bw_state(state);
986 	const struct intel_bw_state *new_bw_state =
987 		intel_atomic_get_new_bw_state(state);
988 	u16 old_mask, new_mask;
989 
990 	if (!new_bw_state)
991 		return;
992 
993 	old_mask = old_bw_state->qgv_points_mask;
994 	new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
995 
996 	if (old_mask == new_mask)
997 		return;
998 
999 	WARN_ON(!new_bw_state->base.changed);
1000 
1001 	drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n",
1002 		    old_mask, new_mask);
1003 
1004 	/*
1005 	 * Restrict required qgv points before updating the configuration.
1006 	 * According to BSpec we can't mask and unmask qgv points at the same
1007 	 * time. Also masking should be done before updating the configuration
1008 	 * and unmasking afterwards.
1009 	 */
1010 	icl_pcode_restrict_qgv_points(display, new_mask);
1011 }
1012 
icl_sagv_post_plane_update(struct intel_atomic_state * state)1013 void icl_sagv_post_plane_update(struct intel_atomic_state *state)
1014 {
1015 	struct intel_display *display = to_intel_display(state);
1016 	const struct intel_bw_state *old_bw_state =
1017 		intel_atomic_get_old_bw_state(state);
1018 	const struct intel_bw_state *new_bw_state =
1019 		intel_atomic_get_new_bw_state(state);
1020 	u16 old_mask, new_mask;
1021 
1022 	if (!new_bw_state)
1023 		return;
1024 
1025 	old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask;
1026 	new_mask = new_bw_state->qgv_points_mask;
1027 
1028 	if (old_mask == new_mask)
1029 		return;
1030 
1031 	WARN_ON(!new_bw_state->base.changed);
1032 
1033 	drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n",
1034 		    old_mask, new_mask);
1035 
1036 	/*
1037 	 * Allow required qgv points after updating the configuration.
1038 	 * According to BSpec we can't mask and unmask qgv points at the same
1039 	 * time. Also masking should be done before updating the configuration
1040 	 * and unmasking afterwards.
1041 	 */
1042 	icl_pcode_restrict_qgv_points(display, new_mask);
1043 }
1044 
mtl_find_qgv_points(struct intel_display * display,unsigned int data_rate,unsigned int num_active_planes,struct intel_bw_state * new_bw_state)1045 static int mtl_find_qgv_points(struct intel_display *display,
1046 			       unsigned int data_rate,
1047 			       unsigned int num_active_planes,
1048 			       struct intel_bw_state *new_bw_state)
1049 {
1050 	unsigned int best_rate = UINT_MAX;
1051 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
1052 	unsigned int qgv_peak_bw  = 0;
1053 	int i;
1054 	int ret;
1055 
1056 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1057 	if (ret)
1058 		return ret;
1059 
1060 	/*
1061 	 * If SAGV cannot be enabled, disable the pcode SAGV by passing all 1's
1062 	 * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is
1063 	 * not enabled. PM Demand code will clamp the value for the register
1064 	 */
1065 	if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
1066 		new_bw_state->qgv_point_peakbw = U16_MAX;
1067 		drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw.");
1068 		return 0;
1069 	}
1070 
1071 	/*
1072 	 * Find the best QGV point by comparing the data_rate with max data rate
1073 	 * offered per plane group
1074 	 */
1075 	for (i = 0; i < num_qgv_points; i++) {
1076 		unsigned int bw_index =
1077 			tgl_max_bw_index(display, num_active_planes, i);
1078 		unsigned int max_data_rate;
1079 
1080 		if (bw_index >= ARRAY_SIZE(display->bw.max))
1081 			continue;
1082 
1083 		max_data_rate = display->bw.max[bw_index].deratedbw[i];
1084 
1085 		if (max_data_rate < data_rate)
1086 			continue;
1087 
1088 		if (max_data_rate - data_rate < best_rate) {
1089 			best_rate = max_data_rate - data_rate;
1090 			qgv_peak_bw = display->bw.max[bw_index].peakbw[i];
1091 		}
1092 
1093 		drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n",
1094 			    i, max_data_rate, data_rate, qgv_peak_bw);
1095 	}
1096 
1097 	drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n",
1098 		    qgv_peak_bw, data_rate);
1099 
1100 	/*
1101 	 * The display configuration cannot be supported if no QGV point
1102 	 * satisfying the required data rate is found
1103 	 */
1104 	if (qgv_peak_bw == 0) {
1105 		drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n",
1106 			    data_rate, num_active_planes);
1107 		return -EINVAL;
1108 	}
1109 
1110 	/* MTL PM DEMAND expects QGV BW parameter in multiples of 100 mbps */
1111 	new_bw_state->qgv_point_peakbw = DIV_ROUND_CLOSEST(qgv_peak_bw, 100);
1112 
1113 	return 0;
1114 }
1115 
icl_find_qgv_points(struct intel_display * display,unsigned int data_rate,unsigned int num_active_planes,const struct intel_bw_state * old_bw_state,struct intel_bw_state * new_bw_state)1116 static int icl_find_qgv_points(struct intel_display *display,
1117 			       unsigned int data_rate,
1118 			       unsigned int num_active_planes,
1119 			       const struct intel_bw_state *old_bw_state,
1120 			       struct intel_bw_state *new_bw_state)
1121 {
1122 	unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points;
1123 	unsigned int num_qgv_points = display->bw.max[0].num_qgv_points;
1124 	u16 psf_points = 0;
1125 	u16 qgv_points = 0;
1126 	int i;
1127 	int ret;
1128 
1129 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1130 	if (ret)
1131 		return ret;
1132 
1133 	for (i = 0; i < num_qgv_points; i++) {
1134 		unsigned int max_data_rate = icl_qgv_bw(display,
1135 							num_active_planes, i);
1136 		if (max_data_rate >= data_rate)
1137 			qgv_points |= BIT(i);
1138 
1139 		drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n",
1140 			    i, max_data_rate, data_rate);
1141 	}
1142 
1143 	for (i = 0; i < num_psf_gv_points; i++) {
1144 		unsigned int max_data_rate = adl_psf_bw(display, i);
1145 
1146 		if (max_data_rate >= data_rate)
1147 			psf_points |= BIT(i);
1148 
1149 		drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d"
1150 			    " required %d\n",
1151 			    i, max_data_rate, data_rate);
1152 	}
1153 
1154 	/*
1155 	 * BSpec states that we always should have at least one allowed point
1156 	 * left, so if we couldn't - simply reject the configuration for obvious
1157 	 * reasons.
1158 	 */
1159 	if (qgv_points == 0) {
1160 		drm_dbg_kms(display->drm, "No QGV points provide sufficient memory"
1161 			    " bandwidth %d for display configuration(%d active planes).\n",
1162 			    data_rate, num_active_planes);
1163 		return -EINVAL;
1164 	}
1165 
1166 	if (num_psf_gv_points > 0 && psf_points == 0) {
1167 		drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory"
1168 			    " bandwidth %d for display configuration(%d active planes).\n",
1169 			    data_rate, num_active_planes);
1170 		return -EINVAL;
1171 	}
1172 
1173 	/*
1174 	 * Leave only single point with highest bandwidth, if
1175 	 * we can't enable SAGV due to the increased memory latency it may
1176 	 * cause.
1177 	 */
1178 	if (!intel_bw_can_enable_sagv(display, new_bw_state)) {
1179 		qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes);
1180 		drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n",
1181 			    qgv_points);
1182 	}
1183 
1184 	/*
1185 	 * We store the ones which need to be masked as that is what PCode
1186 	 * actually accepts as a parameter.
1187 	 */
1188 	new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display,
1189 								    qgv_points,
1190 								    psf_points);
1191 	/*
1192 	 * If the actual mask had changed we need to make sure that
1193 	 * the commits are serialized(in case this is a nomodeset, nonblocking)
1194 	 */
1195 	if (new_bw_state->qgv_points_mask != old_bw_state->qgv_points_mask) {
1196 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1197 		if (ret)
1198 			return ret;
1199 	}
1200 
1201 	return 0;
1202 }
1203 
intel_bw_check_qgv_points(struct intel_display * display,const struct intel_bw_state * old_bw_state,struct intel_bw_state * new_bw_state)1204 static int intel_bw_check_qgv_points(struct intel_display *display,
1205 				     const struct intel_bw_state *old_bw_state,
1206 				     struct intel_bw_state *new_bw_state)
1207 {
1208 	unsigned int data_rate = intel_bw_data_rate(display, new_bw_state);
1209 	unsigned int num_active_planes =
1210 			intel_bw_num_active_planes(display, new_bw_state);
1211 
1212 	data_rate = DIV_ROUND_UP(data_rate, 1000);
1213 
1214 	if (DISPLAY_VER(display) >= 14)
1215 		return mtl_find_qgv_points(display, data_rate, num_active_planes,
1216 					   new_bw_state);
1217 	else
1218 		return icl_find_qgv_points(display, data_rate, num_active_planes,
1219 					   old_bw_state, new_bw_state);
1220 }
1221 
intel_bw_check_data_rate(struct intel_atomic_state * state,bool * changed)1222 static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed)
1223 {
1224 	struct intel_display *display = to_intel_display(state);
1225 	const struct intel_crtc_state *new_crtc_state, *old_crtc_state;
1226 	struct intel_crtc *crtc;
1227 	int i;
1228 
1229 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1230 					    new_crtc_state, i) {
1231 		unsigned int old_data_rate =
1232 			intel_crtc_bw_data_rate(old_crtc_state);
1233 		unsigned int new_data_rate =
1234 			intel_crtc_bw_data_rate(new_crtc_state);
1235 		unsigned int old_active_planes =
1236 			intel_crtc_bw_num_active_planes(old_crtc_state);
1237 		unsigned int new_active_planes =
1238 			intel_crtc_bw_num_active_planes(new_crtc_state);
1239 		struct intel_bw_state *new_bw_state;
1240 
1241 		/*
1242 		 * Avoid locking the bw state when
1243 		 * nothing significant has changed.
1244 		 */
1245 		if (old_data_rate == new_data_rate &&
1246 		    old_active_planes == new_active_planes)
1247 			continue;
1248 
1249 		new_bw_state = intel_atomic_get_bw_state(state);
1250 		if (IS_ERR(new_bw_state))
1251 			return PTR_ERR(new_bw_state);
1252 
1253 		new_bw_state->data_rate[crtc->pipe] = new_data_rate;
1254 		new_bw_state->num_active_planes[crtc->pipe] = new_active_planes;
1255 
1256 		*changed = true;
1257 
1258 		drm_dbg_kms(display->drm,
1259 			    "[CRTC:%d:%s] data rate %u num active planes %u\n",
1260 			    crtc->base.base.id, crtc->base.name,
1261 			    new_bw_state->data_rate[crtc->pipe],
1262 			    new_bw_state->num_active_planes[crtc->pipe]);
1263 	}
1264 
1265 	return 0;
1266 }
1267 
intel_bw_modeset_checks(struct intel_atomic_state * state)1268 static int intel_bw_modeset_checks(struct intel_atomic_state *state)
1269 {
1270 	const struct intel_bw_state *old_bw_state;
1271 	struct intel_bw_state *new_bw_state;
1272 	int ret;
1273 
1274 	if (!intel_any_crtc_active_changed(state))
1275 		return 0;
1276 
1277 	new_bw_state = intel_atomic_get_bw_state(state);
1278 	if (IS_ERR(new_bw_state))
1279 		return PTR_ERR(new_bw_state);
1280 
1281 	old_bw_state = intel_atomic_get_old_bw_state(state);
1282 
1283 	new_bw_state->active_pipes =
1284 		intel_calc_active_pipes(state, old_bw_state->active_pipes);
1285 
1286 	ret = intel_atomic_lock_global_state(&new_bw_state->base);
1287 	if (ret)
1288 		return ret;
1289 
1290 	return 0;
1291 }
1292 
intel_bw_check_sagv_mask(struct intel_atomic_state * state)1293 static int intel_bw_check_sagv_mask(struct intel_atomic_state *state)
1294 {
1295 	struct intel_display *display = to_intel_display(state);
1296 	const struct intel_crtc_state *old_crtc_state;
1297 	const struct intel_crtc_state *new_crtc_state;
1298 	const struct intel_bw_state *old_bw_state = NULL;
1299 	struct intel_bw_state *new_bw_state = NULL;
1300 	struct intel_crtc *crtc;
1301 	int ret, i;
1302 
1303 	for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
1304 					    new_crtc_state, i) {
1305 		if (intel_crtc_can_enable_sagv(old_crtc_state) ==
1306 		    intel_crtc_can_enable_sagv(new_crtc_state))
1307 			continue;
1308 
1309 		new_bw_state = intel_atomic_get_bw_state(state);
1310 		if (IS_ERR(new_bw_state))
1311 			return PTR_ERR(new_bw_state);
1312 
1313 		old_bw_state = intel_atomic_get_old_bw_state(state);
1314 
1315 		if (intel_crtc_can_enable_sagv(new_crtc_state))
1316 			new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe);
1317 		else
1318 			new_bw_state->pipe_sagv_reject |= BIT(crtc->pipe);
1319 	}
1320 
1321 	if (!new_bw_state)
1322 		return 0;
1323 
1324 	if (intel_bw_can_enable_sagv(display, new_bw_state) !=
1325 	    intel_bw_can_enable_sagv(display, old_bw_state)) {
1326 		ret = intel_atomic_serialize_global_state(&new_bw_state->base);
1327 		if (ret)
1328 			return ret;
1329 	} else if (new_bw_state->pipe_sagv_reject != old_bw_state->pipe_sagv_reject) {
1330 		ret = intel_atomic_lock_global_state(&new_bw_state->base);
1331 		if (ret)
1332 			return ret;
1333 	}
1334 
1335 	return 0;
1336 }
1337 
intel_bw_atomic_check(struct intel_atomic_state * state)1338 int intel_bw_atomic_check(struct intel_atomic_state *state)
1339 {
1340 	struct intel_display *display = to_intel_display(state);
1341 	bool changed = false;
1342 	struct intel_bw_state *new_bw_state;
1343 	const struct intel_bw_state *old_bw_state;
1344 	int ret;
1345 
1346 	if (DISPLAY_VER(display) < 9)
1347 		return 0;
1348 
1349 	ret = intel_bw_modeset_checks(state);
1350 	if (ret)
1351 		return ret;
1352 
1353 	ret = intel_bw_check_sagv_mask(state);
1354 	if (ret)
1355 		return ret;
1356 
1357 	/* FIXME earlier gens need some checks too */
1358 	if (DISPLAY_VER(display) < 11)
1359 		return 0;
1360 
1361 	ret = intel_bw_check_data_rate(state, &changed);
1362 	if (ret)
1363 		return ret;
1364 
1365 	old_bw_state = intel_atomic_get_old_bw_state(state);
1366 	new_bw_state = intel_atomic_get_new_bw_state(state);
1367 
1368 	if (new_bw_state &&
1369 	    intel_bw_can_enable_sagv(display, old_bw_state) !=
1370 	    intel_bw_can_enable_sagv(display, new_bw_state))
1371 		changed = true;
1372 
1373 	/*
1374 	 * If none of our inputs (data rates, number of active
1375 	 * planes, SAGV yes/no) changed then nothing to do here.
1376 	 */
1377 	if (!changed)
1378 		return 0;
1379 
1380 	ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state);
1381 	if (ret)
1382 		return ret;
1383 
1384 	return 0;
1385 }
1386 
intel_bw_crtc_update(struct intel_bw_state * bw_state,const struct intel_crtc_state * crtc_state)1387 static void intel_bw_crtc_update(struct intel_bw_state *bw_state,
1388 				 const struct intel_crtc_state *crtc_state)
1389 {
1390 	struct intel_display *display = to_intel_display(crtc_state);
1391 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1392 
1393 	bw_state->data_rate[crtc->pipe] =
1394 		intel_crtc_bw_data_rate(crtc_state);
1395 	bw_state->num_active_planes[crtc->pipe] =
1396 		intel_crtc_bw_num_active_planes(crtc_state);
1397 
1398 	drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n",
1399 		    pipe_name(crtc->pipe),
1400 		    bw_state->data_rate[crtc->pipe],
1401 		    bw_state->num_active_planes[crtc->pipe]);
1402 }
1403 
intel_bw_update_hw_state(struct intel_display * display)1404 void intel_bw_update_hw_state(struct intel_display *display)
1405 {
1406 	struct intel_bw_state *bw_state =
1407 		to_intel_bw_state(display->bw.obj.state);
1408 	struct intel_crtc *crtc;
1409 
1410 	if (DISPLAY_VER(display) < 9)
1411 		return;
1412 
1413 	bw_state->active_pipes = 0;
1414 	bw_state->pipe_sagv_reject = 0;
1415 
1416 	for_each_intel_crtc(display->drm, crtc) {
1417 		const struct intel_crtc_state *crtc_state =
1418 			to_intel_crtc_state(crtc->base.state);
1419 		enum pipe pipe = crtc->pipe;
1420 
1421 		if (crtc_state->hw.active)
1422 			bw_state->active_pipes |= BIT(pipe);
1423 
1424 		if (DISPLAY_VER(display) >= 11)
1425 			intel_bw_crtc_update(bw_state, crtc_state);
1426 
1427 		/* initially SAGV has been forced off */
1428 		bw_state->pipe_sagv_reject |= BIT(pipe);
1429 	}
1430 }
1431 
intel_bw_crtc_disable_noatomic(struct intel_crtc * crtc)1432 void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc)
1433 {
1434 	struct intel_display *display = to_intel_display(crtc);
1435 	struct intel_bw_state *bw_state =
1436 		to_intel_bw_state(display->bw.obj.state);
1437 	enum pipe pipe = crtc->pipe;
1438 
1439 	if (DISPLAY_VER(display) < 9)
1440 		return;
1441 
1442 	bw_state->data_rate[pipe] = 0;
1443 	bw_state->num_active_planes[pipe] = 0;
1444 }
1445 
1446 static struct intel_global_state *
intel_bw_duplicate_state(struct intel_global_obj * obj)1447 intel_bw_duplicate_state(struct intel_global_obj *obj)
1448 {
1449 	struct intel_bw_state *state;
1450 
1451 	state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
1452 	if (!state)
1453 		return NULL;
1454 
1455 	return &state->base;
1456 }
1457 
intel_bw_destroy_state(struct intel_global_obj * obj,struct intel_global_state * state)1458 static void intel_bw_destroy_state(struct intel_global_obj *obj,
1459 				   struct intel_global_state *state)
1460 {
1461 	kfree(state);
1462 }
1463 
1464 static const struct intel_global_state_funcs intel_bw_funcs = {
1465 	.atomic_duplicate_state = intel_bw_duplicate_state,
1466 	.atomic_destroy_state = intel_bw_destroy_state,
1467 };
1468 
intel_bw_init(struct intel_display * display)1469 int intel_bw_init(struct intel_display *display)
1470 {
1471 	struct intel_bw_state *state;
1472 
1473 	state = kzalloc_obj(*state);
1474 	if (!state)
1475 		return -ENOMEM;
1476 
1477 	intel_atomic_global_obj_init(display, &display->bw.obj,
1478 				     &state->base, &intel_bw_funcs);
1479 
1480 	/*
1481 	 * Limit this only if we have SAGV. And for Display version 14 onwards
1482 	 * sagv is handled though pmdemand requests
1483 	 */
1484 	if (intel_has_sagv(display) && IS_DISPLAY_VER(display, 11, 13))
1485 		icl_force_disable_sagv(display, state);
1486 
1487 	return 0;
1488 }
1489 
intel_bw_pmdemand_needs_update(struct intel_atomic_state * state)1490 bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state)
1491 {
1492 	const struct intel_bw_state *new_bw_state, *old_bw_state;
1493 
1494 	new_bw_state = intel_atomic_get_new_bw_state(state);
1495 	old_bw_state = intel_atomic_get_old_bw_state(state);
1496 
1497 	if (new_bw_state &&
1498 	    new_bw_state->qgv_point_peakbw != old_bw_state->qgv_point_peakbw)
1499 		return true;
1500 
1501 	return false;
1502 }
1503 
intel_bw_can_enable_sagv(struct intel_display * display,const struct intel_bw_state * bw_state)1504 bool intel_bw_can_enable_sagv(struct intel_display *display,
1505 			      const struct intel_bw_state *bw_state)
1506 {
1507 	if (DISPLAY_VER(display) < 11 &&
1508 	    bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes))
1509 		return false;
1510 
1511 	return bw_state->pipe_sagv_reject == 0;
1512 }
1513 
intel_bw_qgv_point_peakbw(const struct intel_bw_state * bw_state)1514 int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state)
1515 {
1516 	return bw_state->qgv_point_peakbw;
1517 }
1518