xref: /linux/drivers/gpu/drm/i915/display/intel_dmc.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24 
25 #include <linux/debugfs.h>
26 #include <linux/firmware.h>
27 #include <drm/drm_vblank.h>
28 
29 #include <drm/drm_file.h>
30 #include <drm/drm_print.h>
31 
32 #include "i915_reg.h"
33 #include "intel_crtc.h"
34 #include "intel_de.h"
35 #include "intel_display_power_well.h"
36 #include "intel_display_regs.h"
37 #include "intel_display_rpm.h"
38 #include "intel_display_types.h"
39 #include "intel_display_utils.h"
40 #include "intel_dmc.h"
41 #include "intel_dmc_regs.h"
42 #include "intel_flipq.h"
43 #include "intel_step.h"
44 
45 /**
46  * DOC: DMC Firmware Support
47  *
48  * From gen9 onwards we have newly added DMC (Display microcontroller) in display
49  * engine to save and restore the state of display engine when it enter into
50  * low-power state and comes back to normal.
51  */
52 
53 #define INTEL_DMC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git"
54 
55 enum intel_dmc_id {
56 	DMC_FW_MAIN = 0,
57 	DMC_FW_PIPEA,
58 	DMC_FW_PIPEB,
59 	DMC_FW_PIPEC,
60 	DMC_FW_PIPED,
61 	DMC_FW_MAX
62 };
63 
64 struct intel_dmc {
65 	struct intel_display *display;
66 	struct work_struct work;
67 	const char *fw_path;
68 	u32 max_fw_size; /* bytes */
69 	u32 version;
70 	struct {
71 		u32 dc5_start;
72 		u32 count;
73 	} dc6_allowed;
74 	struct dmc_fw_info {
75 		u32 mmio_count;
76 		i915_reg_t mmioaddr[20];
77 		u32 mmiodata[20];
78 		u32 dmc_offset;
79 		u32 start_mmioaddr;
80 		u32 dmc_fw_size; /*dwords */
81 		u32 *payload;
82 		bool present;
83 	} dmc_info[DMC_FW_MAX];
84 };
85 
86 /* Note: This may be NULL. */
87 static struct intel_dmc *display_to_dmc(struct intel_display *display)
88 {
89 	return display->dmc.dmc;
90 }
91 
92 static const char *dmc_firmware_param(struct intel_display *display)
93 {
94 	const char *p = display->params.dmc_firmware_path;
95 
96 	return p && *p ? p : NULL;
97 }
98 
99 static bool dmc_firmware_param_disabled(struct intel_display *display)
100 {
101 	const char *p = dmc_firmware_param(display);
102 
103 	/* Magic path to indicate disabled */
104 	return p && !strcmp(p, "/dev/null");
105 }
106 
107 #define DMC_VERSION(major, minor)	((major) << 16 | (minor))
108 #define DMC_VERSION_MAJOR(version)	((version) >> 16)
109 #define DMC_VERSION_MINOR(version)	((version) & 0xffff)
110 
111 #define DMC_PATH(platform) \
112 	"i915/" __stringify(platform) "_dmc.bin"
113 
114 /*
115  * New DMC additions should not use this. This is used solely to remain
116  * compatible with systems that have not yet updated DMC blobs to use
117  * unversioned file names.
118  */
119 #define DMC_LEGACY_PATH(platform, major, minor) \
120 	"i915/"					\
121 	__stringify(platform) "_dmc_ver"	\
122 	__stringify(major) "_"			\
123 	__stringify(minor) ".bin"
124 
125 #define XE2LPD_DMC_MAX_FW_SIZE		0x8000
126 #define XELPDP_DMC_MAX_FW_SIZE		0x7000
127 #define DISPLAY_VER13_DMC_MAX_FW_SIZE	0x20000
128 #define DISPLAY_VER12_DMC_MAX_FW_SIZE	ICL_DMC_MAX_FW_SIZE
129 
130 #define XE3LPD_3002_DMC_PATH		DMC_PATH(xe3lpd_3002)
131 MODULE_FIRMWARE(XE3LPD_3002_DMC_PATH);
132 
133 #define XE3LPD_DMC_PATH			DMC_PATH(xe3lpd)
134 MODULE_FIRMWARE(XE3LPD_DMC_PATH);
135 
136 #define XE2LPD_DMC_PATH			DMC_PATH(xe2lpd)
137 MODULE_FIRMWARE(XE2LPD_DMC_PATH);
138 
139 #define BMG_DMC_PATH			DMC_PATH(bmg)
140 MODULE_FIRMWARE(BMG_DMC_PATH);
141 
142 #define MTL_DMC_PATH			DMC_PATH(mtl)
143 MODULE_FIRMWARE(MTL_DMC_PATH);
144 
145 #define DG2_DMC_PATH			DMC_LEGACY_PATH(dg2, 2, 08)
146 MODULE_FIRMWARE(DG2_DMC_PATH);
147 
148 #define ADLP_DMC_PATH			DMC_PATH(adlp)
149 #define ADLP_DMC_FALLBACK_PATH		DMC_LEGACY_PATH(adlp, 2, 16)
150 MODULE_FIRMWARE(ADLP_DMC_PATH);
151 MODULE_FIRMWARE(ADLP_DMC_FALLBACK_PATH);
152 
153 #define ADLS_DMC_PATH			DMC_LEGACY_PATH(adls, 2, 01)
154 MODULE_FIRMWARE(ADLS_DMC_PATH);
155 
156 #define DG1_DMC_PATH			DMC_LEGACY_PATH(dg1, 2, 02)
157 MODULE_FIRMWARE(DG1_DMC_PATH);
158 
159 #define RKL_DMC_PATH			DMC_LEGACY_PATH(rkl, 2, 03)
160 MODULE_FIRMWARE(RKL_DMC_PATH);
161 
162 #define TGL_DMC_PATH			DMC_LEGACY_PATH(tgl, 2, 12)
163 MODULE_FIRMWARE(TGL_DMC_PATH);
164 
165 #define ICL_DMC_PATH			DMC_LEGACY_PATH(icl, 1, 09)
166 #define ICL_DMC_MAX_FW_SIZE		0x6000
167 MODULE_FIRMWARE(ICL_DMC_PATH);
168 
169 #define GLK_DMC_PATH			DMC_LEGACY_PATH(glk, 1, 04)
170 #define GLK_DMC_MAX_FW_SIZE		0x4000
171 MODULE_FIRMWARE(GLK_DMC_PATH);
172 
173 #define KBL_DMC_PATH			DMC_LEGACY_PATH(kbl, 1, 04)
174 #define KBL_DMC_MAX_FW_SIZE		BXT_DMC_MAX_FW_SIZE
175 MODULE_FIRMWARE(KBL_DMC_PATH);
176 
177 #define SKL_DMC_PATH			DMC_LEGACY_PATH(skl, 1, 27)
178 #define SKL_DMC_MAX_FW_SIZE		BXT_DMC_MAX_FW_SIZE
179 MODULE_FIRMWARE(SKL_DMC_PATH);
180 
181 #define BXT_DMC_PATH			DMC_LEGACY_PATH(bxt, 1, 07)
182 #define BXT_DMC_MAX_FW_SIZE		0x3000
183 MODULE_FIRMWARE(BXT_DMC_PATH);
184 
185 static const char *dmc_firmware_default(struct intel_display *display, u32 *size)
186 {
187 	const char *fw_path = NULL;
188 	u32 max_fw_size = 0;
189 	if (DISPLAY_VERx100(display) == 3002) {
190 		fw_path = XE3LPD_3002_DMC_PATH;
191 		max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
192 	} else if (DISPLAY_VERx100(display) == 3000) {
193 		fw_path = XE3LPD_DMC_PATH;
194 		max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
195 	} else if (DISPLAY_VERx100(display) == 2000) {
196 		fw_path = XE2LPD_DMC_PATH;
197 		max_fw_size = XE2LPD_DMC_MAX_FW_SIZE;
198 	} else if (DISPLAY_VERx100(display) == 1401) {
199 		fw_path = BMG_DMC_PATH;
200 		max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
201 	} else if (DISPLAY_VERx100(display) == 1400) {
202 		fw_path = MTL_DMC_PATH;
203 		max_fw_size = XELPDP_DMC_MAX_FW_SIZE;
204 	} else if (display->platform.dg2) {
205 		fw_path = DG2_DMC_PATH;
206 		max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
207 	} else if (display->platform.alderlake_p) {
208 		fw_path = ADLP_DMC_PATH;
209 		max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
210 	} else if (display->platform.alderlake_s) {
211 		fw_path = ADLS_DMC_PATH;
212 		max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
213 	} else if (display->platform.dg1) {
214 		fw_path = DG1_DMC_PATH;
215 		max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
216 	} else if (display->platform.rocketlake) {
217 		fw_path = RKL_DMC_PATH;
218 		max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
219 	} else if (display->platform.tigerlake) {
220 		fw_path = TGL_DMC_PATH;
221 		max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE;
222 	} else if (DISPLAY_VER(display) == 11) {
223 		fw_path = ICL_DMC_PATH;
224 		max_fw_size = ICL_DMC_MAX_FW_SIZE;
225 	} else if (display->platform.geminilake) {
226 		fw_path = GLK_DMC_PATH;
227 		max_fw_size = GLK_DMC_MAX_FW_SIZE;
228 	} else if (display->platform.kabylake ||
229 		   display->platform.coffeelake ||
230 		   display->platform.cometlake) {
231 		fw_path = KBL_DMC_PATH;
232 		max_fw_size = KBL_DMC_MAX_FW_SIZE;
233 	} else if (display->platform.skylake) {
234 		fw_path = SKL_DMC_PATH;
235 		max_fw_size = SKL_DMC_MAX_FW_SIZE;
236 	} else if (display->platform.broxton) {
237 		fw_path = BXT_DMC_PATH;
238 		max_fw_size = BXT_DMC_MAX_FW_SIZE;
239 	}
240 
241 	*size = max_fw_size;
242 
243 	return fw_path;
244 }
245 
246 #define DMC_DEFAULT_FW_OFFSET		0xFFFFFFFF
247 #define PACKAGE_MAX_FW_INFO_ENTRIES	20
248 #define PACKAGE_V2_MAX_FW_INFO_ENTRIES	32
249 #define DMC_V1_MAX_MMIO_COUNT		8
250 #define DMC_V3_MAX_MMIO_COUNT		20
251 #define DMC_V1_MMIO_START_RANGE		0x80000
252 
253 #define PIPE_TO_DMC_ID(pipe)		 (DMC_FW_PIPEA + ((pipe) - PIPE_A))
254 
255 struct intel_css_header {
256 	/* 0x09 for DMC */
257 	u32 module_type;
258 
259 	/* Includes the DMC specific header in dwords */
260 	u32 header_len;
261 
262 	/* always value would be 0x10000 */
263 	u32 header_ver;
264 
265 	/* Not used */
266 	u32 module_id;
267 
268 	/* Not used */
269 	u32 module_vendor;
270 
271 	/* in YYYYMMDD format */
272 	u32 date;
273 
274 	/* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
275 	u32 size;
276 
277 	/* Not used */
278 	u32 key_size;
279 
280 	/* Not used */
281 	u32 modulus_size;
282 
283 	/* Not used */
284 	u32 exponent_size;
285 
286 	/* Not used */
287 	u32 reserved1[12];
288 
289 	/* Major Minor */
290 	u32 version;
291 
292 	/* Not used */
293 	u32 reserved2[8];
294 
295 	/* Not used */
296 	u32 kernel_header_info;
297 } __packed;
298 
299 struct intel_fw_info {
300 	u8 reserved1;
301 
302 	/* reserved on package_header version 1, must be 0 on version 2 */
303 	u8 dmc_id;
304 
305 	/* Stepping (A, B, C, ..., *). * is a wildcard */
306 	char stepping;
307 
308 	/* Sub-stepping (0, 1, ..., *). * is a wildcard */
309 	char substepping;
310 
311 	u32 offset;
312 	u32 reserved2;
313 } __packed;
314 
315 struct intel_package_header {
316 	/* DMC container header length in dwords */
317 	u8 header_len;
318 
319 	/* 0x01, 0x02 */
320 	u8 header_ver;
321 
322 	u8 reserved[10];
323 
324 	/* Number of valid entries in the FWInfo array below */
325 	u32 num_entries;
326 } __packed;
327 
328 struct intel_dmc_header_base {
329 	/* always value would be 0x40403E3E */
330 	u32 signature;
331 
332 	/* DMC binary header length */
333 	u8 header_len;
334 
335 	/* 0x01 */
336 	u8 header_ver;
337 
338 	/* Reserved */
339 	u16 dmcc_ver;
340 
341 	/* Major, Minor */
342 	u32 project;
343 
344 	/* Firmware program size (excluding header) in dwords */
345 	u32 fw_size;
346 
347 	/* Major Minor version */
348 	u32 fw_version;
349 } __packed;
350 
351 struct intel_dmc_header_v1 {
352 	struct intel_dmc_header_base base;
353 
354 	/* Number of valid MMIO cycles present. */
355 	u32 mmio_count;
356 
357 	/* MMIO address */
358 	u32 mmioaddr[DMC_V1_MAX_MMIO_COUNT];
359 
360 	/* MMIO data */
361 	u32 mmiodata[DMC_V1_MAX_MMIO_COUNT];
362 
363 	/* FW filename  */
364 	char dfile[32];
365 
366 	u32 reserved1[2];
367 } __packed;
368 
369 struct intel_dmc_header_v3 {
370 	struct intel_dmc_header_base base;
371 
372 	/* DMC RAM start MMIO address */
373 	u32 start_mmioaddr;
374 
375 	u32 reserved[9];
376 
377 	/* FW filename */
378 	char dfile[32];
379 
380 	/* Number of valid MMIO cycles present. */
381 	u32 mmio_count;
382 
383 	/* MMIO address */
384 	u32 mmioaddr[DMC_V3_MAX_MMIO_COUNT];
385 
386 	/* MMIO data */
387 	u32 mmiodata[DMC_V3_MAX_MMIO_COUNT];
388 } __packed;
389 
390 struct stepping_info {
391 	char stepping;
392 	char substepping;
393 };
394 
395 #define for_each_dmc_id(__dmc_id) \
396 	for ((__dmc_id) = DMC_FW_MAIN; (__dmc_id) < DMC_FW_MAX; (__dmc_id)++)
397 
398 static bool is_valid_dmc_id(enum intel_dmc_id dmc_id)
399 {
400 	return dmc_id >= DMC_FW_MAIN && dmc_id < DMC_FW_MAX;
401 }
402 
403 static bool has_dmc_id_fw(struct intel_display *display, enum intel_dmc_id dmc_id)
404 {
405 	struct intel_dmc *dmc = display_to_dmc(display);
406 
407 	return dmc && dmc->dmc_info[dmc_id].payload;
408 }
409 
410 bool intel_dmc_has_payload(struct intel_display *display)
411 {
412 	return has_dmc_id_fw(display, DMC_FW_MAIN);
413 }
414 
415 static const struct stepping_info *
416 intel_get_stepping_info(struct intel_display *display,
417 			struct stepping_info *si)
418 {
419 	const char *step_name = intel_step_name(INTEL_DISPLAY_STEP(display));
420 
421 	si->stepping = step_name[0];
422 	si->substepping = step_name[1];
423 	return si;
424 }
425 
426 static void gen9_set_dc_state_debugmask(struct intel_display *display)
427 {
428 	/* The below bit doesn't need to be cleared ever afterwards */
429 	intel_de_rmw(display, DC_STATE_DEBUG, 0,
430 		     DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
431 	intel_de_posting_read(display, DC_STATE_DEBUG);
432 }
433 
434 static void disable_event_handler(struct intel_display *display,
435 				  i915_reg_t ctl_reg, i915_reg_t htp_reg)
436 {
437 	intel_de_write(display, ctl_reg,
438 		       REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
439 				      DMC_EVT_CTL_TYPE_EDGE_0_1) |
440 		       REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
441 				      DMC_EVENT_FALSE));
442 	intel_de_write(display, htp_reg, 0);
443 }
444 
445 static void disable_all_event_handlers(struct intel_display *display,
446 				       enum intel_dmc_id dmc_id)
447 {
448 	int handler;
449 
450 	/* TODO: disable the event handlers on pre-GEN12 platforms as well */
451 	if (DISPLAY_VER(display) < 12)
452 		return;
453 
454 	if (!has_dmc_id_fw(display, dmc_id))
455 		return;
456 
457 	for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++)
458 		disable_event_handler(display,
459 				      DMC_EVT_CTL(display, dmc_id, handler),
460 				      DMC_EVT_HTP(display, dmc_id, handler));
461 }
462 
463 static void adlp_pipedmc_clock_gating_wa(struct intel_display *display, bool enable)
464 {
465 	enum pipe pipe;
466 
467 	/*
468 	 * Wa_16015201720:adl-p,dg2
469 	 * The WA requires clock gating to be disabled all the time
470 	 * for pipe A and B.
471 	 * For pipe C and D clock gating needs to be disabled only
472 	 * during initializing the firmware.
473 	 */
474 	if (enable)
475 		for (pipe = PIPE_A; pipe <= PIPE_D; pipe++)
476 			intel_de_rmw(display, CLKGATE_DIS_PSL_EXT(pipe),
477 				     0, PIPEDMC_GATING_DIS);
478 	else
479 		for (pipe = PIPE_C; pipe <= PIPE_D; pipe++)
480 			intel_de_rmw(display, CLKGATE_DIS_PSL_EXT(pipe),
481 				     PIPEDMC_GATING_DIS, 0);
482 }
483 
484 static void mtl_pipedmc_clock_gating_wa(struct intel_display *display)
485 {
486 	/*
487 	 * Wa_16015201720
488 	 * The WA requires clock gating to be disabled all the time
489 	 * for pipe A and B.
490 	 */
491 	intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0,
492 		     MTL_PIPEDMC_GATING_DIS(PIPE_A) |
493 		     MTL_PIPEDMC_GATING_DIS(PIPE_B));
494 }
495 
496 static void pipedmc_clock_gating_wa(struct intel_display *display, bool enable)
497 {
498 	if (display->platform.meteorlake && enable)
499 		mtl_pipedmc_clock_gating_wa(display);
500 	else if (DISPLAY_VER(display) == 13)
501 		adlp_pipedmc_clock_gating_wa(display, enable);
502 }
503 
504 static u32 pipedmc_interrupt_mask(struct intel_display *display)
505 {
506 	/*
507 	 * FIXME PIPEDMC_ERROR not enabled for now due to LNL pipe B
508 	 * triggering it during the first DC state transition. Figure
509 	 * out what is going on...
510 	 */
511 	return PIPEDMC_FLIPQ_PROG_DONE |
512 		PIPEDMC_GTT_FAULT |
513 		PIPEDMC_ATS_FAULT;
514 }
515 
516 static u32 dmc_evt_ctl_disable(u32 dmc_evt_ctl)
517 {
518 	/*
519 	 * DMC_EVT_CTL_ENABLE cannot be cleared once set. Always
520 	 * configure it based on the original event definition to
521 	 * avoid mismatches in assert_dmc_loaded().
522 	 */
523 	return (dmc_evt_ctl & DMC_EVT_CTL_ENABLE) |
524 		REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
525 			       DMC_EVT_CTL_TYPE_EDGE_0_1) |
526 		REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
527 			       DMC_EVENT_FALSE);
528 }
529 
530 static bool is_dmc_evt_ctl_reg(struct intel_display *display,
531 			       enum intel_dmc_id dmc_id, i915_reg_t reg)
532 {
533 	u32 offset = i915_mmio_reg_offset(reg);
534 	u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0));
535 	u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
536 
537 	return offset >= start && offset < end;
538 }
539 
540 static bool is_dmc_evt_htp_reg(struct intel_display *display,
541 			       enum intel_dmc_id dmc_id, i915_reg_t reg)
542 {
543 	u32 offset = i915_mmio_reg_offset(reg);
544 	u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0));
545 	u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
546 
547 	return offset >= start && offset < end;
548 }
549 
550 static bool is_event_handler(struct intel_display *display,
551 			     enum intel_dmc_id dmc_id,
552 			     unsigned int event_id,
553 			     i915_reg_t reg, u32 data)
554 {
555 	return is_dmc_evt_ctl_reg(display, dmc_id, reg) &&
556 		REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id;
557 }
558 
559 static bool fixup_dmc_evt(struct intel_display *display,
560 			  enum intel_dmc_id dmc_id,
561 			  i915_reg_t reg_ctl, u32 *data_ctl,
562 			  i915_reg_t reg_htp, u32 *data_htp)
563 {
564 	if (!is_dmc_evt_ctl_reg(display, dmc_id, reg_ctl))
565 		return false;
566 
567 	if (!is_dmc_evt_htp_reg(display, dmc_id, reg_htp))
568 		return false;
569 
570 	/* make sure reg_ctl and reg_htp are for the same event */
571 	if (i915_mmio_reg_offset(reg_ctl) - i915_mmio_reg_offset(DMC_EVT_CTL(display, dmc_id, 0)) !=
572 	    i915_mmio_reg_offset(reg_htp) - i915_mmio_reg_offset(DMC_EVT_HTP(display, dmc_id, 0)))
573 		return false;
574 
575 	/*
576 	 * On ADL-S the HRR event handler is not restored after DC6.
577 	 * Clear it to zero from the beginning to avoid mismatches later.
578 	 */
579 	if (display->platform.alderlake_s && dmc_id == DMC_FW_MAIN &&
580 	    is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
581 		*data_ctl = 0;
582 		*data_htp = 0;
583 		return true;
584 	}
585 
586 	/*
587 	 * TGL/ADL-S DMC firmware incorrectly uses the undelayed vblank
588 	 * event for the HRR handler, when it should be using the delayed
589 	 * vblank event instead. Fixed firmware was never released
590 	 * so the Windows driver just hacks around it by overriding
591 	 * the event ID. Do the same.
592 	 */
593 	if ((display->platform.tigerlake || display->platform.alderlake_s) &&
594 	    is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg_ctl, *data_ctl)) {
595 		*data_ctl &= ~DMC_EVT_CTL_EVENT_ID_MASK;
596 		*data_ctl |=  REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
597 					     MAINDMC_EVENT_VBLANK_DELAYED_A);
598 		return true;
599 	}
600 
601 	return false;
602 }
603 
604 static bool disable_dmc_evt(struct intel_display *display,
605 			    enum intel_dmc_id dmc_id,
606 			    i915_reg_t reg, u32 data)
607 {
608 	if (!is_dmc_evt_ctl_reg(display, dmc_id, reg))
609 		return false;
610 
611 	/* keep all pipe DMC events disabled by default */
612 	if (dmc_id != DMC_FW_MAIN)
613 		return true;
614 
615 	/* also disable the flip queue event on the main DMC on TGL */
616 	if (display->platform.tigerlake &&
617 	    is_event_handler(display, dmc_id, MAINDMC_EVENT_CLK_MSEC, reg, data))
618 		return true;
619 
620 	/* also disable the HRR event on the main DMC on TGL/ADLS */
621 	if ((display->platform.tigerlake || display->platform.alderlake_s) &&
622 	    is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_DELAYED_A, reg, data))
623 		return true;
624 
625 	return false;
626 }
627 
628 static u32 dmc_mmiodata(struct intel_display *display,
629 			struct intel_dmc *dmc,
630 			enum intel_dmc_id dmc_id, int i)
631 {
632 	if (disable_dmc_evt(display, dmc_id,
633 			    dmc->dmc_info[dmc_id].mmioaddr[i],
634 			    dmc->dmc_info[dmc_id].mmiodata[i]))
635 		return dmc_evt_ctl_disable(dmc->dmc_info[dmc_id].mmiodata[i]);
636 	else
637 		return dmc->dmc_info[dmc_id].mmiodata[i];
638 }
639 
640 static void dmc_load_mmio(struct intel_display *display, enum intel_dmc_id dmc_id)
641 {
642 	struct intel_dmc *dmc = display_to_dmc(display);
643 	int i;
644 
645 	for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
646 		intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i],
647 			       dmc_mmiodata(display, dmc, dmc_id, i));
648 	}
649 }
650 
651 static void dmc_load_program(struct intel_display *display, enum intel_dmc_id dmc_id)
652 {
653 	struct intel_dmc *dmc = display_to_dmc(display);
654 	int i;
655 
656 	disable_all_event_handlers(display, dmc_id);
657 
658 	preempt_disable();
659 
660 	for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) {
661 		intel_de_write_fw(display,
662 				  DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i),
663 				  dmc->dmc_info[dmc_id].payload[i]);
664 	}
665 
666 	preempt_enable();
667 
668 	dmc_load_mmio(display, dmc_id);
669 }
670 
671 static void assert_dmc_loaded(struct intel_display *display,
672 			      enum intel_dmc_id dmc_id)
673 {
674 	struct intel_dmc *dmc = display_to_dmc(display);
675 	u32 expected, found;
676 	int i;
677 
678 	if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
679 		return;
680 
681 	found = intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, 0));
682 	expected = dmc->dmc_info[dmc_id].payload[0];
683 
684 	drm_WARN(display->drm, found != expected,
685 		 "DMC %d program storage start incorrect (expected 0x%x, current 0x%x)\n",
686 		 dmc_id, expected, found);
687 
688 	for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
689 		i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i];
690 
691 		found = intel_de_read(display, reg);
692 		expected = dmc_mmiodata(display, dmc, dmc_id, i);
693 
694 		drm_WARN(display->drm, found != expected,
695 			 "DMC %d mmio[%d]/0x%x incorrect (expected 0x%x, current 0x%x)\n",
696 			 dmc_id, i, i915_mmio_reg_offset(reg), expected, found);
697 	}
698 }
699 
700 void assert_main_dmc_loaded(struct intel_display *display)
701 {
702 	assert_dmc_loaded(display, DMC_FW_MAIN);
703 }
704 
705 static bool need_pipedmc_load_program(struct intel_display *display)
706 {
707 	/* On TGL/derivatives pipe DMC state is lost when PG1 is disabled */
708 	return DISPLAY_VER(display) == 12;
709 }
710 
711 static bool need_pipedmc_load_mmio(struct intel_display *display, enum pipe pipe)
712 {
713 	/*
714 	 * PTL:
715 	 * - pipe A/B DMC doesn't need save/restore
716 	 * - pipe C/D DMC is in PG0, needs manual save/restore
717 	 */
718 	if (DISPLAY_VER(display) == 30)
719 		return pipe >= PIPE_C;
720 
721 	/*
722 	 * FIXME LNL unclear, main DMC firmware has the pipe DMC A/B PG0
723 	 * save/restore, but so far unable to see the loss of pipe DMC state
724 	 * in action. Are we just failing to turn off PG0 due to some other
725 	 * SoC level stuff?
726 	 */
727 	if (DISPLAY_VER(display) == 20)
728 		return false;
729 
730 	/*
731 	 * FIXME BMG untested, main DMC firmware has the
732 	 * pipe DMC A/B PG0 save/restore...
733 	 */
734 	if (display->platform.battlemage)
735 		return false;
736 
737 	/*
738 	 * DG2:
739 	 * - Pipe DMCs presumably in PG0?
740 	 * - No DC6, and even DC9 doesn't seem to result
741 	 *   in loss of DMC state for whatever reason
742 	 */
743 	if (display->platform.dg2)
744 		return false;
745 
746 	/*
747 	 * ADL/MTL:
748 	 * - pipe A/B DMC is in PG0, saved/restored by the main DMC
749 	 * - pipe C/D DMC is in PG0, needs manual save/restore
750 	 */
751 	if (IS_DISPLAY_VER(display, 13, 14))
752 		return pipe >= PIPE_C;
753 
754 	return false;
755 }
756 
757 static bool can_enable_pipedmc(const struct intel_crtc_state *crtc_state)
758 {
759 	struct intel_display *display = to_intel_display(crtc_state);
760 
761 	/*
762 	 * On TGL/derivatives pipe DMC state is lost when PG1 is disabled.
763 	 * Do not even enable the pipe DMC when that can happen outside
764 	 * of driver control (PSR+DC5/6).
765 	 */
766 	if (DISPLAY_VER(display) == 12 && crtc_state->has_psr)
767 		return false;
768 
769 	return true;
770 }
771 
772 void intel_dmc_enable_pipe(const struct intel_crtc_state *crtc_state)
773 {
774 	struct intel_display *display = to_intel_display(crtc_state);
775 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
776 	enum pipe pipe = crtc->pipe;
777 	enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
778 
779 	if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
780 		return;
781 
782 	if (!can_enable_pipedmc(crtc_state)) {
783 		intel_dmc_disable_pipe(crtc_state);
784 		return;
785 	}
786 
787 	if (need_pipedmc_load_program(display))
788 		dmc_load_program(display, dmc_id);
789 	else if (need_pipedmc_load_mmio(display, pipe))
790 		dmc_load_mmio(display, dmc_id);
791 
792 	assert_dmc_loaded(display, dmc_id);
793 
794 	if (DISPLAY_VER(display) >= 20) {
795 		intel_flipq_reset(display, pipe);
796 
797 		intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display));
798 		intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~pipedmc_interrupt_mask(display));
799 	}
800 
801 	if (DISPLAY_VER(display) >= 14)
802 		intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe));
803 	else
804 		intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE);
805 }
806 
807 void intel_dmc_disable_pipe(const struct intel_crtc_state *crtc_state)
808 {
809 	struct intel_display *display = to_intel_display(crtc_state);
810 	struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
811 	enum pipe pipe = crtc->pipe;
812 	enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
813 
814 	if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id))
815 		return;
816 
817 	if (DISPLAY_VER(display) >= 14)
818 		intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0);
819 	else
820 		intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
821 
822 	if (DISPLAY_VER(display) >= 20) {
823 		intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~0);
824 		intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display));
825 
826 		intel_flipq_reset(display, pipe);
827 	}
828 }
829 
830 static void dmc_configure_event(struct intel_display *display,
831 				enum intel_dmc_id dmc_id,
832 				unsigned int event_id,
833 				bool enable)
834 {
835 	struct intel_dmc *dmc = display_to_dmc(display);
836 	int num_handlers = 0;
837 	int i;
838 
839 	for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
840 		i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i];
841 		u32 data = dmc->dmc_info[dmc_id].mmiodata[i];
842 
843 		if (!is_event_handler(display, dmc_id, event_id, reg, data))
844 			continue;
845 
846 		intel_de_write(display, reg, enable ? data : dmc_evt_ctl_disable(data));
847 		num_handlers++;
848 	}
849 
850 	drm_WARN_ONCE(display->drm, num_handlers != 1,
851 		      "DMC %d has %d handlers for event 0x%x\n",
852 		      dmc_id, num_handlers, event_id);
853 }
854 
855 /**
856  * intel_dmc_block_pkgc() - block PKG C-state
857  * @display: display instance
858  * @pipe: pipe which register use to block
859  * @block: block/unblock
860  *
861  * This interface is target for Wa_16025596647 usage. I.e. to set/clear
862  * PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS bit in PIPEDMC_BLOCK_PKGC_SW register.
863  */
864 void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe,
865 			  bool block)
866 {
867 	intel_de_rmw(display, PIPEDMC_BLOCK_PKGC_SW(pipe),
868 		     PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS, block ?
869 		     PIPEDMC_BLOCK_PKGC_SW_BLOCK_PKGC_ALWAYS : 0);
870 }
871 
872 /**
873  * intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank() - start of PKG
874  * C-state exit
875  * @display: display instance
876  * @pipe: pipe which register use to block
877  * @enable: enable/disable
878  *
879  * This interface is target for Wa_16025596647 usage. I.e. start the package C
880  * exit at the start of the undelayed vblank
881  */
882 void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display,
883 							    enum pipe pipe, bool enable)
884 {
885 	enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe);
886 
887 	dmc_configure_event(display, dmc_id, PIPEDMC_EVENT_VBLANK, enable);
888 }
889 
890 /**
891  * intel_dmc_load_program() - write the firmware from memory to register.
892  * @display: display instance
893  *
894  * DMC firmware is read from a .bin file and kept in internal memory one time.
895  * Everytime display comes back from low power state this function is called to
896  * copy the firmware from internal memory to registers.
897  */
898 void intel_dmc_load_program(struct intel_display *display)
899 {
900 	struct i915_power_domains *power_domains = &display->power.domains;
901 	enum intel_dmc_id dmc_id;
902 
903 	if (!intel_dmc_has_payload(display))
904 		return;
905 
906 	assert_display_rpm_held(display);
907 
908 	pipedmc_clock_gating_wa(display, true);
909 
910 	for_each_dmc_id(dmc_id) {
911 		dmc_load_program(display, dmc_id);
912 		assert_dmc_loaded(display, dmc_id);
913 	}
914 
915 	if (DISPLAY_VER(display) >= 20)
916 		intel_de_write(display, DMC_FQ_W2_PTS_CFG_SEL,
917 			       PIPE_D_DMC_W2_PTS_CONFIG_SELECT(PIPE_D) |
918 			       PIPE_C_DMC_W2_PTS_CONFIG_SELECT(PIPE_C) |
919 			       PIPE_B_DMC_W2_PTS_CONFIG_SELECT(PIPE_B) |
920 			       PIPE_A_DMC_W2_PTS_CONFIG_SELECT(PIPE_A));
921 
922 	power_domains->dc_state = 0;
923 
924 	gen9_set_dc_state_debugmask(display);
925 
926 	pipedmc_clock_gating_wa(display, false);
927 }
928 
929 /**
930  * intel_dmc_disable_program() - disable the firmware
931  * @display: display instance
932  *
933  * Disable all event handlers in the firmware, making sure the firmware is
934  * inactive after the display is uninitialized.
935  */
936 void intel_dmc_disable_program(struct intel_display *display)
937 {
938 	enum intel_dmc_id dmc_id;
939 
940 	if (!intel_dmc_has_payload(display))
941 		return;
942 
943 	pipedmc_clock_gating_wa(display, true);
944 
945 	for_each_dmc_id(dmc_id)
946 		disable_all_event_handlers(display, dmc_id);
947 
948 	pipedmc_clock_gating_wa(display, false);
949 }
950 
951 static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info,
952 				     const struct stepping_info *si)
953 {
954 	if ((fw_info->substepping == '*' && si->stepping == fw_info->stepping) ||
955 	    (si->stepping == fw_info->stepping && si->substepping == fw_info->substepping) ||
956 	    /*
957 	     * If we don't find a more specific one from above two checks, we
958 	     * then check for the generic one to be sure to work even with
959 	     * "broken firmware"
960 	     */
961 	    (si->stepping == '*' && si->substepping == fw_info->substepping) ||
962 	    (fw_info->stepping == '*' && fw_info->substepping == '*'))
963 		return true;
964 
965 	return false;
966 }
967 
968 /*
969  * Search fw_info table for dmc_offset to find firmware binary: num_entries is
970  * already sanitized.
971  */
972 static void dmc_set_fw_offset(struct intel_dmc *dmc,
973 			      const struct intel_fw_info *fw_info,
974 			      unsigned int num_entries,
975 			      const struct stepping_info *si,
976 			      u8 package_ver)
977 {
978 	struct intel_display *display = dmc->display;
979 	enum intel_dmc_id dmc_id;
980 	unsigned int i;
981 
982 	for (i = 0; i < num_entries; i++) {
983 		dmc_id = package_ver <= 1 ? DMC_FW_MAIN : fw_info[i].dmc_id;
984 
985 		if (!is_valid_dmc_id(dmc_id)) {
986 			drm_dbg(display->drm, "Unsupported firmware id: %u\n", dmc_id);
987 			continue;
988 		}
989 
990 		/* More specific versions come first, so we don't even have to
991 		 * check for the stepping since we already found a previous FW
992 		 * for this id.
993 		 */
994 		if (dmc->dmc_info[dmc_id].present)
995 			continue;
996 
997 		if (fw_info_matches_stepping(&fw_info[i], si)) {
998 			dmc->dmc_info[dmc_id].present = true;
999 			dmc->dmc_info[dmc_id].dmc_offset = fw_info[i].offset;
1000 		}
1001 	}
1002 }
1003 
1004 static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
1005 				       const u32 *mmioaddr, u32 mmio_count,
1006 				       int header_ver, enum intel_dmc_id dmc_id)
1007 {
1008 	struct intel_display *display = dmc->display;
1009 	u32 start_range, end_range;
1010 	int i;
1011 
1012 	if (header_ver == 1) {
1013 		start_range = DMC_MMIO_START_RANGE;
1014 		end_range = DMC_MMIO_END_RANGE;
1015 	} else if (dmc_id == DMC_FW_MAIN) {
1016 		start_range = TGL_MAIN_MMIO_START;
1017 		end_range = TGL_MAIN_MMIO_END;
1018 	} else if (DISPLAY_VER(display) >= 13) {
1019 		start_range = ADLP_PIPE_MMIO_START;
1020 		end_range = ADLP_PIPE_MMIO_END;
1021 	} else if (DISPLAY_VER(display) >= 12) {
1022 		start_range = TGL_PIPE_MMIO_START(dmc_id);
1023 		end_range = TGL_PIPE_MMIO_END(dmc_id);
1024 	} else {
1025 		drm_warn(display->drm, "Unknown mmio range for sanity check");
1026 		return false;
1027 	}
1028 
1029 	for (i = 0; i < mmio_count; i++) {
1030 		if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
1031 			return false;
1032 	}
1033 
1034 	return true;
1035 }
1036 
1037 static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
1038 			       const struct intel_dmc_header_base *dmc_header,
1039 			       size_t rem_size, enum intel_dmc_id dmc_id)
1040 {
1041 	struct intel_display *display = dmc->display;
1042 	struct dmc_fw_info *dmc_info = &dmc->dmc_info[dmc_id];
1043 	unsigned int header_len_bytes, dmc_header_size, payload_size, i;
1044 	const u32 *mmioaddr, *mmiodata;
1045 	u32 mmio_count, mmio_count_max, start_mmioaddr;
1046 	u8 *payload;
1047 
1048 	BUILD_BUG_ON(ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V3_MAX_MMIO_COUNT ||
1049 		     ARRAY_SIZE(dmc_info->mmioaddr) < DMC_V1_MAX_MMIO_COUNT);
1050 
1051 	/*
1052 	 * Check if we can access common fields, we will checkc again below
1053 	 * after we have read the version
1054 	 */
1055 	if (rem_size < sizeof(struct intel_dmc_header_base))
1056 		goto error_truncated;
1057 
1058 	/* Cope with small differences between v1 and v3 */
1059 	if (dmc_header->header_ver == 3) {
1060 		const struct intel_dmc_header_v3 *v3 =
1061 			(const struct intel_dmc_header_v3 *)dmc_header;
1062 
1063 		if (rem_size < sizeof(struct intel_dmc_header_v3))
1064 			goto error_truncated;
1065 
1066 		mmioaddr = v3->mmioaddr;
1067 		mmiodata = v3->mmiodata;
1068 		mmio_count = v3->mmio_count;
1069 		mmio_count_max = DMC_V3_MAX_MMIO_COUNT;
1070 		/* header_len is in dwords */
1071 		header_len_bytes = dmc_header->header_len * 4;
1072 		start_mmioaddr = v3->start_mmioaddr;
1073 		dmc_header_size = sizeof(*v3);
1074 	} else if (dmc_header->header_ver == 1) {
1075 		const struct intel_dmc_header_v1 *v1 =
1076 			(const struct intel_dmc_header_v1 *)dmc_header;
1077 
1078 		if (rem_size < sizeof(struct intel_dmc_header_v1))
1079 			goto error_truncated;
1080 
1081 		mmioaddr = v1->mmioaddr;
1082 		mmiodata = v1->mmiodata;
1083 		mmio_count = v1->mmio_count;
1084 		mmio_count_max = DMC_V1_MAX_MMIO_COUNT;
1085 		header_len_bytes = dmc_header->header_len;
1086 		start_mmioaddr = DMC_V1_MMIO_START_RANGE;
1087 		dmc_header_size = sizeof(*v1);
1088 	} else {
1089 		drm_err(display->drm, "Unknown DMC fw header version: %u\n",
1090 			dmc_header->header_ver);
1091 		return 0;
1092 	}
1093 
1094 	if (header_len_bytes != dmc_header_size) {
1095 		drm_err(display->drm, "DMC firmware has wrong dmc header length "
1096 			"(%u bytes)\n", header_len_bytes);
1097 		return 0;
1098 	}
1099 
1100 	/* Cache the dmc header info. */
1101 	if (mmio_count > mmio_count_max) {
1102 		drm_err(display->drm, "DMC firmware has wrong mmio count %u\n", mmio_count);
1103 		return 0;
1104 	}
1105 
1106 	if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
1107 					dmc_header->header_ver, dmc_id)) {
1108 		drm_err(display->drm, "DMC firmware has Wrong MMIO Addresses\n");
1109 		return 0;
1110 	}
1111 
1112 	drm_dbg_kms(display->drm, "DMC %d:\n", dmc_id);
1113 	for (i = 0; i < mmio_count; i++) {
1114 		dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
1115 		dmc_info->mmiodata[i] = mmiodata[i];
1116 	}
1117 
1118 	for (i = 0; i < mmio_count - 1; i++) {
1119 		u32 orig_mmiodata[2] = {
1120 			dmc_info->mmiodata[i],
1121 			dmc_info->mmiodata[i+1],
1122 		};
1123 
1124 		if (!fixup_dmc_evt(display, dmc_id,
1125 				   dmc_info->mmioaddr[i], &dmc_info->mmiodata[i],
1126 				   dmc_info->mmioaddr[i+1], &dmc_info->mmiodata[i+1]))
1127 			continue;
1128 
1129 		drm_dbg_kms(display->drm,
1130 			    " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_CTL)\n",
1131 			    i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]),
1132 			    orig_mmiodata[0], dmc_info->mmiodata[i]);
1133 		drm_dbg_kms(display->drm,
1134 			    " mmio[%d]: 0x%x = 0x%x->0x%x (EVT_HTP)\n",
1135 			    i+1, i915_mmio_reg_offset(dmc_info->mmioaddr[i+1]),
1136 			    orig_mmiodata[1], dmc_info->mmiodata[i+1]);
1137 	}
1138 
1139 	for (i = 0; i < mmio_count; i++) {
1140 		drm_dbg_kms(display->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
1141 			    i, i915_mmio_reg_offset(dmc_info->mmioaddr[i]), dmc_info->mmiodata[i],
1142 			    is_dmc_evt_ctl_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
1143 			    is_dmc_evt_htp_reg(display, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
1144 			    disable_dmc_evt(display, dmc_id, dmc_info->mmioaddr[i],
1145 					    dmc_info->mmiodata[i]) ? " (disabling)" : "");
1146 	}
1147 	dmc_info->mmio_count = mmio_count;
1148 	dmc_info->start_mmioaddr = start_mmioaddr;
1149 
1150 	rem_size -= header_len_bytes;
1151 
1152 	/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
1153 	payload_size = dmc_header->fw_size * 4;
1154 	if (rem_size < payload_size)
1155 		goto error_truncated;
1156 
1157 	if (payload_size > dmc->max_fw_size) {
1158 		drm_err(display->drm, "DMC FW too big (%u bytes)\n", payload_size);
1159 		return 0;
1160 	}
1161 	dmc_info->dmc_fw_size = dmc_header->fw_size;
1162 
1163 	dmc_info->payload = kmalloc(payload_size, GFP_KERNEL);
1164 	if (!dmc_info->payload)
1165 		return 0;
1166 
1167 	payload = (u8 *)(dmc_header) + header_len_bytes;
1168 	memcpy(dmc_info->payload, payload, payload_size);
1169 
1170 	return header_len_bytes + payload_size;
1171 
1172 error_truncated:
1173 	drm_err(display->drm, "Truncated DMC firmware, refusing.\n");
1174 	return 0;
1175 }
1176 
1177 static u32
1178 parse_dmc_fw_package(struct intel_dmc *dmc,
1179 		     const struct intel_package_header *package_header,
1180 		     const struct stepping_info *si,
1181 		     size_t rem_size)
1182 {
1183 	struct intel_display *display = dmc->display;
1184 	u32 package_size = sizeof(struct intel_package_header);
1185 	u32 num_entries, max_entries;
1186 	const struct intel_fw_info *fw_info;
1187 
1188 	if (rem_size < package_size)
1189 		goto error_truncated;
1190 
1191 	if (package_header->header_ver == 1) {
1192 		max_entries = PACKAGE_MAX_FW_INFO_ENTRIES;
1193 	} else if (package_header->header_ver == 2) {
1194 		max_entries = PACKAGE_V2_MAX_FW_INFO_ENTRIES;
1195 	} else {
1196 		drm_err(display->drm, "DMC firmware has unknown header version %u\n",
1197 			package_header->header_ver);
1198 		return 0;
1199 	}
1200 
1201 	/*
1202 	 * We should always have space for max_entries,
1203 	 * even if not all are used
1204 	 */
1205 	package_size += max_entries * sizeof(struct intel_fw_info);
1206 	if (rem_size < package_size)
1207 		goto error_truncated;
1208 
1209 	if (package_header->header_len * 4 != package_size) {
1210 		drm_err(display->drm, "DMC firmware has wrong package header length "
1211 			"(%u bytes)\n", package_size);
1212 		return 0;
1213 	}
1214 
1215 	num_entries = package_header->num_entries;
1216 	if (WARN_ON(num_entries > max_entries))
1217 		num_entries = max_entries;
1218 
1219 	fw_info = (const struct intel_fw_info *)
1220 		((u8 *)package_header + sizeof(*package_header));
1221 	dmc_set_fw_offset(dmc, fw_info, num_entries, si,
1222 			  package_header->header_ver);
1223 
1224 	/* dmc_offset is in dwords */
1225 	return package_size;
1226 
1227 error_truncated:
1228 	drm_err(display->drm, "Truncated DMC firmware, refusing.\n");
1229 	return 0;
1230 }
1231 
1232 /* Return number of bytes parsed or 0 on error */
1233 static u32 parse_dmc_fw_css(struct intel_dmc *dmc,
1234 			    struct intel_css_header *css_header,
1235 			    size_t rem_size)
1236 {
1237 	struct intel_display *display = dmc->display;
1238 
1239 	if (rem_size < sizeof(struct intel_css_header)) {
1240 		drm_err(display->drm, "Truncated DMC firmware, refusing.\n");
1241 		return 0;
1242 	}
1243 
1244 	if (sizeof(struct intel_css_header) !=
1245 	    (css_header->header_len * 4)) {
1246 		drm_err(display->drm, "DMC firmware has wrong CSS header length "
1247 			"(%u bytes)\n",
1248 			(css_header->header_len * 4));
1249 		return 0;
1250 	}
1251 
1252 	dmc->version = css_header->version;
1253 
1254 	return sizeof(struct intel_css_header);
1255 }
1256 
1257 static int parse_dmc_fw(struct intel_dmc *dmc, const struct firmware *fw)
1258 {
1259 	struct intel_display *display = dmc->display;
1260 	struct intel_css_header *css_header;
1261 	struct intel_package_header *package_header;
1262 	struct intel_dmc_header_base *dmc_header;
1263 	struct stepping_info display_info = { '*', '*'};
1264 	const struct stepping_info *si = intel_get_stepping_info(display, &display_info);
1265 	enum intel_dmc_id dmc_id;
1266 	u32 readcount = 0;
1267 	u32 r, offset;
1268 
1269 	if (!fw)
1270 		return -EINVAL;
1271 
1272 	/* Extract CSS Header information */
1273 	css_header = (struct intel_css_header *)fw->data;
1274 	r = parse_dmc_fw_css(dmc, css_header, fw->size);
1275 	if (!r)
1276 		return -EINVAL;
1277 
1278 	readcount += r;
1279 
1280 	/* Extract Package Header information */
1281 	package_header = (struct intel_package_header *)&fw->data[readcount];
1282 	r = parse_dmc_fw_package(dmc, package_header, si, fw->size - readcount);
1283 	if (!r)
1284 		return -EINVAL;
1285 
1286 	readcount += r;
1287 
1288 	for_each_dmc_id(dmc_id) {
1289 		if (!dmc->dmc_info[dmc_id].present)
1290 			continue;
1291 
1292 		offset = readcount + dmc->dmc_info[dmc_id].dmc_offset * 4;
1293 		if (offset > fw->size) {
1294 			drm_err(display->drm, "Reading beyond the fw_size\n");
1295 			continue;
1296 		}
1297 
1298 		dmc_header = (struct intel_dmc_header_base *)&fw->data[offset];
1299 		parse_dmc_fw_header(dmc, dmc_header, fw->size - offset, dmc_id);
1300 	}
1301 
1302 	if (!intel_dmc_has_payload(display)) {
1303 		drm_err(display->drm, "DMC firmware main program not found\n");
1304 		return -ENOENT;
1305 	}
1306 
1307 	return 0;
1308 }
1309 
1310 static void intel_dmc_runtime_pm_get(struct intel_display *display)
1311 {
1312 	drm_WARN_ON(display->drm, display->dmc.wakeref);
1313 	display->dmc.wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT);
1314 }
1315 
1316 static void intel_dmc_runtime_pm_put(struct intel_display *display)
1317 {
1318 	intel_wakeref_t wakeref __maybe_unused =
1319 		fetch_and_zero(&display->dmc.wakeref);
1320 
1321 	intel_display_power_put(display, POWER_DOMAIN_INIT, wakeref);
1322 }
1323 
1324 static const char *dmc_fallback_path(struct intel_display *display)
1325 {
1326 	if (display->platform.alderlake_p)
1327 		return ADLP_DMC_FALLBACK_PATH;
1328 
1329 	return NULL;
1330 }
1331 
1332 static void dmc_load_work_fn(struct work_struct *work)
1333 {
1334 	struct intel_dmc *dmc = container_of(work, typeof(*dmc), work);
1335 	struct intel_display *display = dmc->display;
1336 	const struct firmware *fw = NULL;
1337 	const char *fallback_path;
1338 	int err;
1339 
1340 	err = request_firmware(&fw, dmc->fw_path, display->drm->dev);
1341 
1342 	if (err == -ENOENT && !dmc_firmware_param(display)) {
1343 		fallback_path = dmc_fallback_path(display);
1344 		if (fallback_path) {
1345 			drm_dbg_kms(display->drm, "%s not found, falling back to %s\n",
1346 				    dmc->fw_path, fallback_path);
1347 			err = request_firmware(&fw, fallback_path, display->drm->dev);
1348 			if (err == 0)
1349 				dmc->fw_path = fallback_path;
1350 		}
1351 	}
1352 
1353 	if (err) {
1354 		drm_notice(display->drm,
1355 			   "Failed to load DMC firmware %s (%pe). Disabling runtime power management.\n",
1356 			   dmc->fw_path, ERR_PTR(err));
1357 		drm_notice(display->drm, "DMC firmware homepage: %s",
1358 			   INTEL_DMC_FIRMWARE_URL);
1359 		return;
1360 	}
1361 
1362 	err = parse_dmc_fw(dmc, fw);
1363 	if (err) {
1364 		drm_notice(display->drm,
1365 			   "Failed to parse DMC firmware %s (%pe). Disabling runtime power management.\n",
1366 			   dmc->fw_path, ERR_PTR(err));
1367 		goto out;
1368 	}
1369 
1370 	intel_dmc_load_program(display);
1371 	intel_dmc_runtime_pm_put(display);
1372 
1373 	drm_info(display->drm, "Finished loading DMC firmware %s (v%u.%u)\n",
1374 		 dmc->fw_path, DMC_VERSION_MAJOR(dmc->version),
1375 		 DMC_VERSION_MINOR(dmc->version));
1376 
1377 out:
1378 	release_firmware(fw);
1379 }
1380 
1381 /**
1382  * intel_dmc_init() - initialize the firmware loading.
1383  * @display: display instance
1384  *
1385  * This function is called at the time of loading the display driver to read
1386  * firmware from a .bin file and copied into a internal memory.
1387  */
1388 void intel_dmc_init(struct intel_display *display)
1389 {
1390 	struct intel_dmc *dmc;
1391 
1392 	if (!HAS_DMC(display))
1393 		return;
1394 
1395 	/*
1396 	 * Obtain a runtime pm reference, until DMC is loaded, to avoid entering
1397 	 * runtime-suspend.
1398 	 *
1399 	 * On error, we return with the rpm wakeref held to prevent runtime
1400 	 * suspend as runtime suspend *requires* a working DMC for whatever
1401 	 * reason.
1402 	 */
1403 	intel_dmc_runtime_pm_get(display);
1404 
1405 	dmc = kzalloc(sizeof(*dmc), GFP_KERNEL);
1406 	if (!dmc)
1407 		return;
1408 
1409 	dmc->display = display;
1410 
1411 	INIT_WORK(&dmc->work, dmc_load_work_fn);
1412 
1413 	dmc->fw_path = dmc_firmware_default(display, &dmc->max_fw_size);
1414 
1415 	if (dmc_firmware_param_disabled(display)) {
1416 		drm_info(display->drm, "Disabling DMC firmware and runtime PM\n");
1417 		goto out;
1418 	}
1419 
1420 	if (dmc_firmware_param(display))
1421 		dmc->fw_path = dmc_firmware_param(display);
1422 
1423 	if (!dmc->fw_path) {
1424 		drm_dbg_kms(display->drm,
1425 			    "No known DMC firmware for platform, disabling runtime PM\n");
1426 		goto out;
1427 	}
1428 
1429 	display->dmc.dmc = dmc;
1430 
1431 	drm_dbg_kms(display->drm, "Loading %s\n", dmc->fw_path);
1432 	queue_work(display->wq.unordered, &dmc->work);
1433 
1434 	return;
1435 
1436 out:
1437 	kfree(dmc);
1438 }
1439 
1440 /**
1441  * intel_dmc_suspend() - prepare DMC firmware before system suspend
1442  * @display: display instance
1443  *
1444  * Prepare the DMC firmware before entering system suspend. This includes
1445  * flushing pending work items and releasing any resources acquired during
1446  * init.
1447  */
1448 void intel_dmc_suspend(struct intel_display *display)
1449 {
1450 	struct intel_dmc *dmc = display_to_dmc(display);
1451 
1452 	if (!HAS_DMC(display))
1453 		return;
1454 
1455 	if (dmc)
1456 		flush_work(&dmc->work);
1457 
1458 	/* Drop the reference held in case DMC isn't loaded. */
1459 	if (!intel_dmc_has_payload(display))
1460 		intel_dmc_runtime_pm_put(display);
1461 }
1462 
1463 void intel_dmc_wait_fw_load(struct intel_display *display)
1464 {
1465 	struct intel_dmc *dmc = display_to_dmc(display);
1466 
1467 	if (!HAS_DMC(display))
1468 		return;
1469 
1470 	if (dmc)
1471 		flush_work(&dmc->work);
1472 }
1473 
1474 /**
1475  * intel_dmc_resume() - init DMC firmware during system resume
1476  * @display: display instance
1477  *
1478  * Reinitialize the DMC firmware during system resume, reacquiring any
1479  * resources released in intel_dmc_suspend().
1480  */
1481 void intel_dmc_resume(struct intel_display *display)
1482 {
1483 	if (!HAS_DMC(display))
1484 		return;
1485 
1486 	/*
1487 	 * Reacquire the reference to keep RPM disabled in case DMC isn't
1488 	 * loaded.
1489 	 */
1490 	if (!intel_dmc_has_payload(display))
1491 		intel_dmc_runtime_pm_get(display);
1492 }
1493 
1494 /**
1495  * intel_dmc_fini() - unload the DMC firmware.
1496  * @display: display instance
1497  *
1498  * Firmmware unloading includes freeing the internal memory and reset the
1499  * firmware loading status.
1500  */
1501 void intel_dmc_fini(struct intel_display *display)
1502 {
1503 	struct intel_dmc *dmc = display_to_dmc(display);
1504 	enum intel_dmc_id dmc_id;
1505 
1506 	if (!HAS_DMC(display))
1507 		return;
1508 
1509 	intel_dmc_suspend(display);
1510 	drm_WARN_ON(display->drm, display->dmc.wakeref);
1511 
1512 	if (dmc) {
1513 		for_each_dmc_id(dmc_id)
1514 			kfree(dmc->dmc_info[dmc_id].payload);
1515 
1516 		kfree(dmc);
1517 		display->dmc.dmc = NULL;
1518 	}
1519 }
1520 
1521 struct intel_dmc_snapshot {
1522 	bool initialized;
1523 	bool loaded;
1524 	u32 version;
1525 };
1526 
1527 struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display)
1528 {
1529 	struct intel_dmc *dmc = display_to_dmc(display);
1530 	struct intel_dmc_snapshot *snapshot;
1531 
1532 	if (!HAS_DMC(display))
1533 		return NULL;
1534 
1535 	snapshot = kzalloc(sizeof(*snapshot), GFP_ATOMIC);
1536 	if (!snapshot)
1537 		return NULL;
1538 
1539 	snapshot->initialized = dmc;
1540 	snapshot->loaded = intel_dmc_has_payload(display);
1541 	if (dmc)
1542 		snapshot->version = dmc->version;
1543 
1544 	return snapshot;
1545 }
1546 
1547 void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p)
1548 {
1549 	if (!snapshot)
1550 		return;
1551 
1552 	drm_printf(p, "DMC initialized: %s\n", str_yes_no(snapshot->initialized));
1553 	drm_printf(p, "DMC loaded: %s\n", str_yes_no(snapshot->loaded));
1554 	if (snapshot->initialized)
1555 		drm_printf(p, "DMC fw version: %d.%d\n",
1556 			   DMC_VERSION_MAJOR(snapshot->version),
1557 			   DMC_VERSION_MINOR(snapshot->version));
1558 }
1559 
1560 void intel_dmc_update_dc6_allowed_count(struct intel_display *display,
1561 					bool start_tracking)
1562 {
1563 	struct intel_dmc *dmc = display_to_dmc(display);
1564 	u32 dc5_cur_count;
1565 
1566 	if (DISPLAY_VER(dmc->display) < 14)
1567 		return;
1568 
1569 	dc5_cur_count = intel_de_read(dmc->display, DG1_DMC_DEBUG_DC5_COUNT);
1570 
1571 	if (!start_tracking)
1572 		dmc->dc6_allowed.count += dc5_cur_count - dmc->dc6_allowed.dc5_start;
1573 
1574 	dmc->dc6_allowed.dc5_start = dc5_cur_count;
1575 }
1576 
1577 static bool intel_dmc_get_dc6_allowed_count(struct intel_display *display, u32 *count)
1578 {
1579 	struct i915_power_domains *power_domains = &display->power.domains;
1580 	struct intel_dmc *dmc = display_to_dmc(display);
1581 	bool dc6_enabled;
1582 
1583 	if (DISPLAY_VER(display) < 14)
1584 		return false;
1585 
1586 	mutex_lock(&power_domains->lock);
1587 	dc6_enabled = intel_de_read(display, DC_STATE_EN) &
1588 		      DC_STATE_EN_UPTO_DC6;
1589 	if (dc6_enabled)
1590 		intel_dmc_update_dc6_allowed_count(display, false);
1591 
1592 	*count = dmc->dc6_allowed.count;
1593 	mutex_unlock(&power_domains->lock);
1594 
1595 	return true;
1596 }
1597 
1598 static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused)
1599 {
1600 	struct intel_display *display = m->private;
1601 	struct intel_dmc *dmc = display_to_dmc(display);
1602 	struct ref_tracker *wakeref;
1603 	i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG;
1604 	u32 dc6_allowed_count;
1605 
1606 	if (!HAS_DMC(display))
1607 		return -ENODEV;
1608 
1609 	wakeref = intel_display_rpm_get(display);
1610 
1611 	seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc));
1612 	seq_printf(m, "fw loaded: %s\n",
1613 		   str_yes_no(intel_dmc_has_payload(display)));
1614 	seq_printf(m, "path: %s\n", dmc ? dmc->fw_path : "N/A");
1615 	seq_printf(m, "Pipe A fw needed: %s\n",
1616 		   str_yes_no(DISPLAY_VER(display) >= 12));
1617 	seq_printf(m, "Pipe A fw loaded: %s\n",
1618 		   str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEA)));
1619 	seq_printf(m, "Pipe B fw needed: %s\n",
1620 		   str_yes_no(display->platform.alderlake_p ||
1621 			      DISPLAY_VER(display) >= 14));
1622 	seq_printf(m, "Pipe B fw loaded: %s\n",
1623 		   str_yes_no(has_dmc_id_fw(display, DMC_FW_PIPEB)));
1624 
1625 	if (!intel_dmc_has_payload(display))
1626 		goto out;
1627 
1628 	seq_printf(m, "version: %d.%d\n", DMC_VERSION_MAJOR(dmc->version),
1629 		   DMC_VERSION_MINOR(dmc->version));
1630 
1631 	if (DISPLAY_VER(display) >= 12) {
1632 		i915_reg_t dc3co_reg;
1633 
1634 		if (display->platform.dgfx || DISPLAY_VER(display) >= 14) {
1635 			dc3co_reg = DG1_DMC_DEBUG3;
1636 			dc5_reg = DG1_DMC_DEBUG_DC5_COUNT;
1637 		} else {
1638 			dc3co_reg = TGL_DMC_DEBUG3;
1639 			dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
1640 			dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
1641 		}
1642 
1643 		seq_printf(m, "DC3CO count: %d\n",
1644 			   intel_de_read(display, dc3co_reg));
1645 	} else {
1646 		dc5_reg = display->platform.broxton ? BXT_DMC_DC3_DC5_COUNT :
1647 			SKL_DMC_DC3_DC5_COUNT;
1648 		if (!display->platform.geminilake && !display->platform.broxton)
1649 			dc6_reg = SKL_DMC_DC5_DC6_COUNT;
1650 	}
1651 
1652 	seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(display, dc5_reg));
1653 
1654 	if (intel_dmc_get_dc6_allowed_count(display, &dc6_allowed_count))
1655 		seq_printf(m, "DC5 -> DC6 allowed count: %d\n",
1656 			   dc6_allowed_count);
1657 	else if (i915_mmio_reg_valid(dc6_reg))
1658 		seq_printf(m, "DC5 -> DC6 count: %d\n",
1659 			   intel_de_read(display, dc6_reg));
1660 
1661 	seq_printf(m, "program base: 0x%08x\n",
1662 		   intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)));
1663 
1664 out:
1665 	seq_printf(m, "ssp base: 0x%08x\n",
1666 		   intel_de_read(display, DMC_SSP_BASE));
1667 	seq_printf(m, "htp: 0x%08x\n", intel_de_read(display, DMC_HTP_SKL));
1668 
1669 	intel_display_rpm_put(display, wakeref);
1670 
1671 	return 0;
1672 }
1673 
1674 DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status);
1675 
1676 void intel_dmc_debugfs_register(struct intel_display *display)
1677 {
1678 	debugfs_create_file("i915_dmc_info", 0444, display->drm->debugfs_root,
1679 			    display, &intel_dmc_debugfs_status_fops);
1680 }
1681 
1682 void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe)
1683 {
1684 	struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe);
1685 	u32 tmp = 0, int_vector;
1686 
1687 	if (DISPLAY_VER(display) >= 20) {
1688 		tmp = intel_de_read(display, PIPEDMC_INTERRUPT(pipe));
1689 		intel_de_write(display, PIPEDMC_INTERRUPT(pipe), tmp);
1690 
1691 		if (tmp & PIPEDMC_FLIPQ_PROG_DONE) {
1692 			spin_lock(&display->drm->event_lock);
1693 
1694 			if (crtc->flipq_event) {
1695 				/*
1696 				 * Update vblank counter/timestamp in case it
1697 				 * hasn't been done yet for this frame.
1698 				 */
1699 				drm_crtc_accurate_vblank_count(&crtc->base);
1700 
1701 				drm_crtc_send_vblank_event(&crtc->base, crtc->flipq_event);
1702 				crtc->flipq_event = NULL;
1703 			}
1704 
1705 			spin_unlock(&display->drm->event_lock);
1706 		}
1707 
1708 		if (tmp & PIPEDMC_ATS_FAULT)
1709 			drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC ATS fault\n",
1710 					    crtc->base.base.id, crtc->base.name);
1711 		if (tmp & PIPEDMC_GTT_FAULT)
1712 			drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC GTT fault\n",
1713 					    crtc->base.base.id, crtc->base.name);
1714 		if (tmp & PIPEDMC_ERROR)
1715 			drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC error\n",
1716 				crtc->base.base.id, crtc->base.name);
1717 	}
1718 
1719 	int_vector = intel_de_read(display, PIPEDMC_STATUS(pipe)) & PIPEDMC_INT_VECTOR_MASK;
1720 	if (tmp == 0 && int_vector != 0)
1721 		drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC interrupt vector 0x%x\n",
1722 			crtc->base.base.id, crtc->base.name, tmp);
1723 }
1724 
1725 void intel_pipedmc_enable_event(struct intel_crtc *crtc,
1726 				enum pipedmc_event_id event)
1727 {
1728 	struct intel_display *display = to_intel_display(crtc);
1729 	enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
1730 
1731 	dmc_configure_event(display, dmc_id, event, true);
1732 }
1733 
1734 void intel_pipedmc_disable_event(struct intel_crtc *crtc,
1735 				 enum pipedmc_event_id event)
1736 {
1737 	struct intel_display *display = to_intel_display(crtc);
1738 	enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
1739 
1740 	dmc_configure_event(display, dmc_id, event, false);
1741 }
1742 
1743 u32 intel_pipedmc_start_mmioaddr(struct intel_crtc *crtc)
1744 {
1745 	struct intel_display *display = to_intel_display(crtc);
1746 	struct intel_dmc *dmc = display_to_dmc(display);
1747 	enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe);
1748 
1749 	return dmc ? dmc->dmc_info[dmc_id].start_mmioaddr : 0;
1750 }
1751