xref: /linux/drivers/gpu/drm/i915/intel_uncore.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/pm_runtime.h>
25 
26 #include <drm/drm_managed.h>
27 
28 #include "display/intel_display_core.h"
29 #include "gt/intel_engine_regs.h"
30 #include "gt/intel_gt.h"
31 #include "gt/intel_gt_regs.h"
32 
33 #include "i915_drv.h"
34 #include "i915_iosf_mbi.h"
35 #include "i915_reg.h"
36 #include "i915_vgpu.h"
37 #include "i915_wait_util.h"
38 #include "intel_uncore_trace.h"
39 
40 #define FORCEWAKE_ACK_TIMEOUT_MS 50
41 #define GT_FIFO_TIMEOUT_MS	 10
42 
43 struct intel_uncore *to_intel_uncore(struct drm_device *drm)
44 {
45 	return &to_i915(drm)->uncore;
46 }
47 
48 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
49 
50 static void
51 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
52 {
53 	uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
54 }
55 
56 void
57 intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
58 {
59 	spin_lock_init(&i915->mmio_debug.lock);
60 	i915->mmio_debug.unclaimed_mmio_check = 1;
61 
62 	i915->uncore.debug = &i915->mmio_debug;
63 }
64 
65 static void mmio_debug_suspend(struct intel_uncore *uncore)
66 {
67 	if (!uncore->debug)
68 		return;
69 
70 	spin_lock(&uncore->debug->lock);
71 
72 	/* Save and disable mmio debugging for the user bypass */
73 	if (!uncore->debug->suspend_count++) {
74 		uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
75 		uncore->debug->unclaimed_mmio_check = 0;
76 	}
77 
78 	spin_unlock(&uncore->debug->lock);
79 }
80 
81 static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
82 
83 static void mmio_debug_resume(struct intel_uncore *uncore)
84 {
85 	if (!uncore->debug)
86 		return;
87 
88 	spin_lock(&uncore->debug->lock);
89 
90 	if (!--uncore->debug->suspend_count)
91 		uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
92 
93 	if (check_for_unclaimed_mmio(uncore))
94 		drm_info(&uncore->i915->drm,
95 			 "Invalid mmio detected during user access\n");
96 
97 	spin_unlock(&uncore->debug->lock);
98 }
99 
100 static const char * const forcewake_domain_names[] = {
101 	"render",
102 	"gt",
103 	"media",
104 	"vdbox0",
105 	"vdbox1",
106 	"vdbox2",
107 	"vdbox3",
108 	"vdbox4",
109 	"vdbox5",
110 	"vdbox6",
111 	"vdbox7",
112 	"vebox0",
113 	"vebox1",
114 	"vebox2",
115 	"vebox3",
116 	"gsc",
117 };
118 
119 const char *
120 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
121 {
122 	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
123 
124 	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
125 		return forcewake_domain_names[id];
126 
127 	WARN_ON(id);
128 
129 	return "unknown";
130 }
131 
132 #define fw_ack(d) readl((d)->reg_ack)
133 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
134 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
135 
136 static inline void
137 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
138 {
139 	/*
140 	 * We don't really know if the powerwell for the forcewake domain we are
141 	 * trying to reset here does exist at this point (engines could be fused
142 	 * off in ICL+), so no waiting for acks
143 	 */
144 	/* WaRsClearFWBitsAtReset */
145 	if (GRAPHICS_VER(d->uncore->i915) >= 12)
146 		fw_clear(d, 0xefff);
147 	else
148 		fw_clear(d, 0xffff);
149 }
150 
151 static inline void
152 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
153 {
154 	GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
155 	d->uncore->fw_domains_timer |= d->mask;
156 	d->wake_count++;
157 	hrtimer_start_range_ns(&d->timer,
158 			       NSEC_PER_MSEC,
159 			       NSEC_PER_MSEC,
160 			       HRTIMER_MODE_REL);
161 }
162 
163 static inline int
164 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
165 	       const u32 ack,
166 	       const u32 value)
167 {
168 	return wait_for_atomic((fw_ack(d) & ack) == value,
169 			       FORCEWAKE_ACK_TIMEOUT_MS);
170 }
171 
172 static inline int
173 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
174 	       const u32 ack)
175 {
176 	return __wait_for_ack(d, ack, 0);
177 }
178 
179 static inline int
180 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
181 	     const u32 ack)
182 {
183 	return __wait_for_ack(d, ack, ack);
184 }
185 
186 static inline void
187 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
188 {
189 	if (!wait_ack_clear(d, FORCEWAKE_KERNEL))
190 		return;
191 
192 	if (fw_ack(d) == ~0) {
193 		drm_err(&d->uncore->i915->drm,
194 			"%s: MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
195 			intel_uncore_forcewake_domain_to_str(d->id));
196 		intel_gt_set_wedged_async(d->uncore->gt);
197 	} else {
198 		drm_err(&d->uncore->i915->drm,
199 			"%s: timed out waiting for forcewake ack to clear.\n",
200 			intel_uncore_forcewake_domain_to_str(d->id));
201 	}
202 
203 	add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
204 }
205 
206 enum ack_type {
207 	ACK_CLEAR = 0,
208 	ACK_SET
209 };
210 
211 static int
212 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
213 				 const enum ack_type type)
214 {
215 	const u32 ack_bit = FORCEWAKE_KERNEL;
216 	const u32 value = type == ACK_SET ? ack_bit : 0;
217 	unsigned int pass;
218 	bool ack_detected;
219 
220 	/*
221 	 * There is a possibility of driver's wake request colliding
222 	 * with hardware's own wake requests and that can cause
223 	 * hardware to not deliver the driver's ack message.
224 	 *
225 	 * Use a fallback bit toggle to kick the gpu state machine
226 	 * in the hope that the original ack will be delivered along with
227 	 * the fallback ack.
228 	 *
229 	 * This workaround is described in HSDES #1604254524 and it's known as:
230 	 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
231 	 * although the name is a bit misleading.
232 	 */
233 
234 	pass = 1;
235 	do {
236 		wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
237 
238 		fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
239 		/* Give gt some time to relax before the polling frenzy */
240 		udelay(10 * pass);
241 		wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
242 
243 		ack_detected = (fw_ack(d) & ack_bit) == value;
244 
245 		fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
246 	} while (!ack_detected && pass++ < 10);
247 
248 	drm_dbg(&d->uncore->i915->drm,
249 		"%s had to use fallback to %s ack, 0x%x (passes %u)\n",
250 		intel_uncore_forcewake_domain_to_str(d->id),
251 		type == ACK_SET ? "set" : "clear",
252 		fw_ack(d),
253 		pass);
254 
255 	return ack_detected ? 0 : -ETIMEDOUT;
256 }
257 
258 static inline void
259 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
260 {
261 	if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
262 		return;
263 
264 	if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
265 		fw_domain_wait_ack_clear(d);
266 }
267 
268 static inline void
269 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
270 {
271 	fw_set(d, FORCEWAKE_KERNEL);
272 }
273 
274 static inline void
275 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
276 {
277 	if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
278 		drm_err(&d->uncore->i915->drm,
279 			"%s: timed out waiting for forcewake ack request.\n",
280 			intel_uncore_forcewake_domain_to_str(d->id));
281 		add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
282 	}
283 }
284 
285 static inline void
286 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
287 {
288 	if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
289 		return;
290 
291 	if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
292 		fw_domain_wait_ack_set(d);
293 }
294 
295 static inline void
296 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
297 {
298 	fw_clear(d, FORCEWAKE_KERNEL);
299 }
300 
301 static void
302 fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
303 {
304 	struct intel_uncore_forcewake_domain *d;
305 	unsigned int tmp;
306 
307 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
308 
309 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
310 		fw_domain_wait_ack_clear(d);
311 		fw_domain_get(d);
312 	}
313 
314 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
315 		fw_domain_wait_ack_set(d);
316 
317 	uncore->fw_domains_active |= fw_domains;
318 }
319 
320 static void
321 fw_domains_get_with_fallback(struct intel_uncore *uncore,
322 			     enum forcewake_domains fw_domains)
323 {
324 	struct intel_uncore_forcewake_domain *d;
325 	unsigned int tmp;
326 
327 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
328 
329 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
330 		fw_domain_wait_ack_clear_fallback(d);
331 		fw_domain_get(d);
332 	}
333 
334 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
335 		fw_domain_wait_ack_set_fallback(d);
336 
337 	uncore->fw_domains_active |= fw_domains;
338 }
339 
340 static void
341 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
342 {
343 	struct intel_uncore_forcewake_domain *d;
344 	unsigned int tmp;
345 
346 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
347 
348 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
349 		fw_domain_put(d);
350 
351 	uncore->fw_domains_active &= ~fw_domains;
352 }
353 
354 static void
355 fw_domains_reset(struct intel_uncore *uncore,
356 		 enum forcewake_domains fw_domains)
357 {
358 	struct intel_uncore_forcewake_domain *d;
359 	unsigned int tmp;
360 
361 	if (!fw_domains)
362 		return;
363 
364 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
365 
366 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
367 		fw_domain_reset(d);
368 }
369 
370 static inline u32 gt_thread_status(struct intel_uncore *uncore)
371 {
372 	u32 val;
373 
374 	val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
375 	val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
376 
377 	return val;
378 }
379 
380 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
381 {
382 	/*
383 	 * w/a for a sporadic read returning 0 by waiting for the GT
384 	 * thread to wake up.
385 	 */
386 	drm_WARN_ONCE(&uncore->i915->drm,
387 		      wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
388 		      "GT thread status wait timed out\n");
389 }
390 
391 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
392 					      enum forcewake_domains fw_domains)
393 {
394 	fw_domains_get_normal(uncore, fw_domains);
395 
396 	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
397 	__gen6_gt_wait_for_thread_c0(uncore);
398 }
399 
400 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
401 {
402 	u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
403 
404 	return count & GT_FIFO_FREE_ENTRIES_MASK;
405 }
406 
407 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
408 {
409 	u32 n;
410 
411 	/* On VLV, FIFO will be shared by both SW and HW.
412 	 * So, we need to read the FREE_ENTRIES everytime */
413 	if (IS_VALLEYVIEW(uncore->i915))
414 		n = fifo_free_entries(uncore);
415 	else
416 		n = uncore->fifo_count;
417 
418 	if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
419 		if (wait_for_atomic((n = fifo_free_entries(uncore)) >
420 				    GT_FIFO_NUM_RESERVED_ENTRIES,
421 				    GT_FIFO_TIMEOUT_MS)) {
422 			drm_dbg(&uncore->i915->drm,
423 				"GT_FIFO timeout, entries: %u\n", n);
424 			return;
425 		}
426 	}
427 
428 	uncore->fifo_count = n - 1;
429 }
430 
431 static enum hrtimer_restart
432 intel_uncore_fw_release_timer(struct hrtimer *timer)
433 {
434 	struct intel_uncore_forcewake_domain *domain =
435 	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
436 	struct intel_uncore *uncore = domain->uncore;
437 	unsigned long irqflags;
438 
439 	assert_rpm_device_not_suspended(uncore->rpm);
440 
441 	if (xchg(&domain->active, false))
442 		return HRTIMER_RESTART;
443 
444 	spin_lock_irqsave(&uncore->lock, irqflags);
445 
446 	uncore->fw_domains_timer &= ~domain->mask;
447 
448 	GEM_BUG_ON(!domain->wake_count);
449 	if (--domain->wake_count == 0)
450 		fw_domains_put(uncore, domain->mask);
451 
452 	spin_unlock_irqrestore(&uncore->lock, irqflags);
453 
454 	return HRTIMER_NORESTART;
455 }
456 
457 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
458 static unsigned int
459 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
460 {
461 	unsigned long irqflags;
462 	struct intel_uncore_forcewake_domain *domain;
463 	int retry_count = 100;
464 	enum forcewake_domains fw, active_domains;
465 
466 	iosf_mbi_assert_punit_acquired();
467 
468 	/* Hold uncore.lock across reset to prevent any register access
469 	 * with forcewake not set correctly. Wait until all pending
470 	 * timers are run before holding.
471 	 */
472 	while (1) {
473 		unsigned int tmp;
474 
475 		active_domains = 0;
476 
477 		for_each_fw_domain(domain, uncore, tmp) {
478 			smp_store_mb(domain->active, false);
479 			if (hrtimer_cancel(&domain->timer) == 0)
480 				continue;
481 
482 			intel_uncore_fw_release_timer(&domain->timer);
483 		}
484 
485 		spin_lock_irqsave(&uncore->lock, irqflags);
486 
487 		for_each_fw_domain(domain, uncore, tmp) {
488 			if (hrtimer_active(&domain->timer))
489 				active_domains |= domain->mask;
490 		}
491 
492 		if (active_domains == 0)
493 			break;
494 
495 		if (--retry_count == 0) {
496 			drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
497 			break;
498 		}
499 
500 		spin_unlock_irqrestore(&uncore->lock, irqflags);
501 		cond_resched();
502 	}
503 
504 	drm_WARN_ON(&uncore->i915->drm, active_domains);
505 
506 	fw = uncore->fw_domains_active;
507 	if (fw)
508 		fw_domains_put(uncore, fw);
509 
510 	fw_domains_reset(uncore, uncore->fw_domains);
511 	assert_forcewakes_inactive(uncore);
512 
513 	spin_unlock_irqrestore(&uncore->lock, irqflags);
514 
515 	return fw; /* track the lost user forcewake domains */
516 }
517 
518 static bool
519 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
520 {
521 	u32 dbg;
522 
523 	dbg = __raw_uncore_read32(uncore, FPGA_DBG);
524 	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
525 		return false;
526 
527 	/*
528 	 * Bugs in PCI programming (or failing hardware) can occasionally cause
529 	 * us to lose access to the MMIO BAR.  When this happens, register
530 	 * reads will come back with 0xFFFFFFFF for every register and things
531 	 * go bad very quickly.  Let's try to detect that special case and at
532 	 * least try to print a more informative message about what has
533 	 * happened.
534 	 *
535 	 * During normal operation the FPGA_DBG register has several unused
536 	 * bits that will always read back as 0's so we can use them as canaries
537 	 * to recognize when MMIO accesses are just busted.
538 	 */
539 	if (unlikely(dbg == ~0))
540 		drm_err(&uncore->i915->drm,
541 			"Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
542 
543 	__raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
544 
545 	return true;
546 }
547 
548 static bool
549 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
550 {
551 	u32 cer;
552 
553 	cer = __raw_uncore_read32(uncore, CLAIM_ER);
554 	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
555 		return false;
556 
557 	__raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
558 
559 	return true;
560 }
561 
562 static bool
563 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
564 {
565 	u32 fifodbg;
566 
567 	fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
568 
569 	if (unlikely(fifodbg)) {
570 		drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
571 		__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
572 	}
573 
574 	return fifodbg;
575 }
576 
577 static bool
578 check_for_unclaimed_mmio(struct intel_uncore *uncore)
579 {
580 	bool ret = false;
581 
582 	lockdep_assert_held(&uncore->debug->lock);
583 
584 	if (uncore->debug->suspend_count)
585 		return false;
586 
587 	if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
588 		ret |= fpga_check_for_unclaimed_mmio(uncore);
589 
590 	if (intel_uncore_has_dbg_unclaimed(uncore))
591 		ret |= vlv_check_for_unclaimed_mmio(uncore);
592 
593 	if (intel_uncore_has_fifo(uncore))
594 		ret |= gen6_check_for_fifo_debug(uncore);
595 
596 	return ret;
597 }
598 
599 static void forcewake_early_sanitize(struct intel_uncore *uncore,
600 				     unsigned int restore_forcewake)
601 {
602 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
603 
604 	/* WaDisableShadowRegForCpd:chv */
605 	if (IS_CHERRYVIEW(uncore->i915)) {
606 		__raw_uncore_write32(uncore, GTFIFOCTL,
607 				     __raw_uncore_read32(uncore, GTFIFOCTL) |
608 				     GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
609 				     GT_FIFO_CTL_RC6_POLICY_STALL);
610 	}
611 
612 	iosf_mbi_punit_acquire();
613 	intel_uncore_forcewake_reset(uncore);
614 	if (restore_forcewake) {
615 		spin_lock_irq(&uncore->lock);
616 		fw_domains_get(uncore, restore_forcewake);
617 
618 		if (intel_uncore_has_fifo(uncore))
619 			uncore->fifo_count = fifo_free_entries(uncore);
620 		spin_unlock_irq(&uncore->lock);
621 	}
622 	iosf_mbi_punit_release();
623 }
624 
625 void intel_uncore_suspend(struct intel_uncore *uncore)
626 {
627 	if (!intel_uncore_has_forcewake(uncore))
628 		return;
629 
630 	iosf_mbi_punit_acquire();
631 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
632 		&uncore->pmic_bus_access_nb);
633 	uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
634 	iosf_mbi_punit_release();
635 }
636 
637 void intel_uncore_resume_early(struct intel_uncore *uncore)
638 {
639 	unsigned int restore_forcewake;
640 
641 	if (intel_uncore_unclaimed_mmio(uncore))
642 		drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
643 
644 	if (!intel_uncore_has_forcewake(uncore))
645 		return;
646 
647 	restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
648 	forcewake_early_sanitize(uncore, restore_forcewake);
649 
650 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
651 }
652 
653 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
654 {
655 	if (!intel_uncore_has_forcewake(uncore))
656 		return;
657 
658 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
659 }
660 
661 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
662 					 enum forcewake_domains fw_domains)
663 {
664 	struct intel_uncore_forcewake_domain *domain;
665 	unsigned int tmp;
666 
667 	fw_domains &= uncore->fw_domains;
668 
669 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
670 		if (domain->wake_count++) {
671 			fw_domains &= ~domain->mask;
672 			domain->active = true;
673 		}
674 	}
675 
676 	if (fw_domains)
677 		fw_domains_get(uncore, fw_domains);
678 }
679 
680 /**
681  * intel_uncore_forcewake_get - grab forcewake domain references
682  * @uncore: the intel_uncore structure
683  * @fw_domains: forcewake domains to get reference on
684  *
685  * This function can be used get GT's forcewake domain references.
686  * Normal register access will handle the forcewake domains automatically.
687  * However if some sequence requires the GT to not power down a particular
688  * forcewake domains this function should be called at the beginning of the
689  * sequence. And subsequently the reference should be dropped by symmetric
690  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
691  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
692  */
693 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
694 				enum forcewake_domains fw_domains)
695 {
696 	unsigned long irqflags;
697 
698 	if (!uncore->fw_get_funcs)
699 		return;
700 
701 	assert_rpm_wakelock_held(uncore->rpm);
702 
703 	spin_lock_irqsave(&uncore->lock, irqflags);
704 	__intel_uncore_forcewake_get(uncore, fw_domains);
705 	spin_unlock_irqrestore(&uncore->lock, irqflags);
706 }
707 
708 /**
709  * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
710  * @uncore: the intel_uncore structure
711  *
712  * This function is a wrapper around intel_uncore_forcewake_get() to acquire
713  * the GT powerwell and in the process disable our debugging for the
714  * duration of userspace's bypass.
715  */
716 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
717 {
718 	spin_lock_irq(&uncore->lock);
719 	if (!uncore->user_forcewake_count++) {
720 		intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
721 		mmio_debug_suspend(uncore);
722 	}
723 	spin_unlock_irq(&uncore->lock);
724 }
725 
726 /**
727  * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
728  * @uncore: the intel_uncore structure
729  *
730  * This function complements intel_uncore_forcewake_user_get() and releases
731  * the GT powerwell taken on behalf of the userspace bypass.
732  */
733 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
734 {
735 	spin_lock_irq(&uncore->lock);
736 	if (!--uncore->user_forcewake_count) {
737 		mmio_debug_resume(uncore);
738 		intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
739 	}
740 	spin_unlock_irq(&uncore->lock);
741 }
742 
743 /**
744  * intel_uncore_forcewake_get__locked - grab forcewake domain references
745  * @uncore: the intel_uncore structure
746  * @fw_domains: forcewake domains to get reference on
747  *
748  * See intel_uncore_forcewake_get(). This variant places the onus
749  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
750  */
751 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
752 					enum forcewake_domains fw_domains)
753 {
754 	lockdep_assert_held(&uncore->lock);
755 
756 	if (!uncore->fw_get_funcs)
757 		return;
758 
759 	__intel_uncore_forcewake_get(uncore, fw_domains);
760 }
761 
762 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
763 					 enum forcewake_domains fw_domains,
764 					 bool delayed)
765 {
766 	struct intel_uncore_forcewake_domain *domain;
767 	unsigned int tmp;
768 
769 	fw_domains &= uncore->fw_domains;
770 
771 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
772 		GEM_BUG_ON(!domain->wake_count);
773 
774 		if (--domain->wake_count) {
775 			domain->active = true;
776 			continue;
777 		}
778 
779 		if (delayed &&
780 		    !(domain->uncore->fw_domains_timer & domain->mask))
781 			fw_domain_arm_timer(domain);
782 		else
783 			fw_domains_put(uncore, domain->mask);
784 	}
785 }
786 
787 /**
788  * intel_uncore_forcewake_put - release a forcewake domain reference
789  * @uncore: the intel_uncore structure
790  * @fw_domains: forcewake domains to put references
791  *
792  * This function drops the device-level forcewakes for specified
793  * domains obtained by intel_uncore_forcewake_get().
794  */
795 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
796 				enum forcewake_domains fw_domains)
797 {
798 	unsigned long irqflags;
799 
800 	if (!uncore->fw_get_funcs)
801 		return;
802 
803 	spin_lock_irqsave(&uncore->lock, irqflags);
804 	__intel_uncore_forcewake_put(uncore, fw_domains, false);
805 	spin_unlock_irqrestore(&uncore->lock, irqflags);
806 }
807 
808 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
809 					enum forcewake_domains fw_domains)
810 {
811 	unsigned long irqflags;
812 
813 	if (!uncore->fw_get_funcs)
814 		return;
815 
816 	spin_lock_irqsave(&uncore->lock, irqflags);
817 	__intel_uncore_forcewake_put(uncore, fw_domains, true);
818 	spin_unlock_irqrestore(&uncore->lock, irqflags);
819 }
820 
821 /**
822  * intel_uncore_forcewake_flush - flush the delayed release
823  * @uncore: the intel_uncore structure
824  * @fw_domains: forcewake domains to flush
825  */
826 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
827 				  enum forcewake_domains fw_domains)
828 {
829 	struct intel_uncore_forcewake_domain *domain;
830 	unsigned int tmp;
831 
832 	if (!uncore->fw_get_funcs)
833 		return;
834 
835 	fw_domains &= uncore->fw_domains;
836 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
837 		WRITE_ONCE(domain->active, false);
838 		if (hrtimer_cancel(&domain->timer))
839 			intel_uncore_fw_release_timer(&domain->timer);
840 	}
841 }
842 
843 /**
844  * intel_uncore_forcewake_put__locked - release forcewake domain references
845  * @uncore: the intel_uncore structure
846  * @fw_domains: forcewake domains to put references
847  *
848  * See intel_uncore_forcewake_put(). This variant places the onus
849  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
850  */
851 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
852 					enum forcewake_domains fw_domains)
853 {
854 	lockdep_assert_held(&uncore->lock);
855 
856 	if (!uncore->fw_get_funcs)
857 		return;
858 
859 	__intel_uncore_forcewake_put(uncore, fw_domains, false);
860 }
861 
862 void assert_forcewakes_inactive(struct intel_uncore *uncore)
863 {
864 	if (!uncore->fw_get_funcs)
865 		return;
866 
867 	drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
868 		 "Expected all fw_domains to be inactive, but %08x are still on\n",
869 		 uncore->fw_domains_active);
870 }
871 
872 void assert_forcewakes_active(struct intel_uncore *uncore,
873 			      enum forcewake_domains fw_domains)
874 {
875 	struct intel_uncore_forcewake_domain *domain;
876 	unsigned int tmp;
877 
878 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
879 		return;
880 
881 	if (!uncore->fw_get_funcs)
882 		return;
883 
884 	spin_lock_irq(&uncore->lock);
885 
886 	assert_rpm_wakelock_held(uncore->rpm);
887 
888 	fw_domains &= uncore->fw_domains;
889 	drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
890 		 "Expected %08x fw_domains to be active, but %08x are off\n",
891 		 fw_domains, fw_domains & ~uncore->fw_domains_active);
892 
893 	/*
894 	 * Check that the caller has an explicit wakeref and we don't mistake
895 	 * it for the auto wakeref.
896 	 */
897 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
898 		unsigned int actual = READ_ONCE(domain->wake_count);
899 		unsigned int expect = 1;
900 
901 		if (uncore->fw_domains_timer & domain->mask)
902 			expect++; /* pending automatic release */
903 
904 		if (drm_WARN(&uncore->i915->drm, actual < expect,
905 			     "Expected domain %d to be held awake by caller, count=%d\n",
906 			     domain->id, actual))
907 			break;
908 	}
909 
910 	spin_unlock_irq(&uncore->lock);
911 }
912 
913 /*
914  * We give fast paths for the really cool registers.  The second range includes
915  * media domains (and the GSC starting from Xe_LPM+)
916  */
917 #define NEEDS_FORCE_WAKE(reg) ({ \
918 	u32 __reg = (reg); \
919 	__reg < 0x40000 || __reg >= 0x116000; \
920 })
921 
922 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
923 {
924 	if (offset < entry->start)
925 		return -1;
926 	else if (offset > entry->end)
927 		return 1;
928 	else
929 		return 0;
930 }
931 
932 /* Copied and "macroized" from lib/bsearch.c */
933 #define BSEARCH(key, base, num, cmp) ({                                 \
934 	unsigned int start__ = 0, end__ = (num);                        \
935 	typeof(base) result__ = NULL;                                   \
936 	while (start__ < end__) {                                       \
937 		unsigned int mid__ = start__ + (end__ - start__) / 2;   \
938 		int ret__ = (cmp)((key), (base) + mid__);               \
939 		if (ret__ < 0) {                                        \
940 			end__ = mid__;                                  \
941 		} else if (ret__ > 0) {                                 \
942 			start__ = mid__ + 1;                            \
943 		} else {                                                \
944 			result__ = (base) + mid__;                      \
945 			break;                                          \
946 		}                                                       \
947 	}                                                               \
948 	result__;                                                       \
949 })
950 
951 static enum forcewake_domains
952 find_fw_domain(struct intel_uncore *uncore, u32 offset)
953 {
954 	const struct intel_forcewake_range *entry;
955 
956 	if (IS_GSI_REG(offset))
957 		offset += uncore->gsi_offset;
958 
959 	entry = BSEARCH(offset,
960 			uncore->fw_domains_table,
961 			uncore->fw_domains_table_entries,
962 			fw_range_cmp);
963 
964 	if (!entry)
965 		return 0;
966 
967 	/*
968 	 * The list of FW domains depends on the SKU in gen11+ so we
969 	 * can't determine it statically. We use FORCEWAKE_ALL and
970 	 * translate it here to the list of available domains.
971 	 */
972 	if (entry->domains == FORCEWAKE_ALL)
973 		return uncore->fw_domains;
974 
975 	drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
976 		 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
977 		 entry->domains & ~uncore->fw_domains, offset);
978 
979 	return entry->domains;
980 }
981 
982 /*
983  * Shadowed register tables describe special register ranges that i915 is
984  * allowed to write to without acquiring forcewake.  If these registers' power
985  * wells are down, the hardware will save values written by i915 to a shadow
986  * copy and automatically transfer them into the real register the next time
987  * the power well is woken up.  Shadowing only applies to writes; forcewake
988  * must still be acquired when reading from registers in these ranges.
989  *
990  * The documentation for shadowed registers is somewhat spotty on older
991  * platforms.  However missing registers from these lists is non-fatal; it just
992  * means we'll wake up the hardware for some register accesses where we didn't
993  * really need to.
994  *
995  * The ranges listed in these tables must be sorted by offset.
996  *
997  * When adding new tables here, please also add them to
998  * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
999  * scanned for obvious mistakes or typos by the selftests.
1000  */
1001 
1002 static const struct i915_range gen8_shadowed_regs[] = {
1003 	{ .start =  0x2030, .end =  0x2030 },
1004 	{ .start =  0xA008, .end =  0xA00C },
1005 	{ .start = 0x12030, .end = 0x12030 },
1006 	{ .start = 0x1a030, .end = 0x1a030 },
1007 	{ .start = 0x22030, .end = 0x22030 },
1008 };
1009 
1010 static const struct i915_range gen11_shadowed_regs[] = {
1011 	{ .start =   0x2030, .end =   0x2030 },
1012 	{ .start =   0x2550, .end =   0x2550 },
1013 	{ .start =   0xA008, .end =   0xA00C },
1014 	{ .start =  0x22030, .end =  0x22030 },
1015 	{ .start =  0x22230, .end =  0x22230 },
1016 	{ .start =  0x22510, .end =  0x22550 },
1017 	{ .start = 0x1C0030, .end = 0x1C0030 },
1018 	{ .start = 0x1C0230, .end = 0x1C0230 },
1019 	{ .start = 0x1C0510, .end = 0x1C0550 },
1020 	{ .start = 0x1C4030, .end = 0x1C4030 },
1021 	{ .start = 0x1C4230, .end = 0x1C4230 },
1022 	{ .start = 0x1C4510, .end = 0x1C4550 },
1023 	{ .start = 0x1C8030, .end = 0x1C8030 },
1024 	{ .start = 0x1C8230, .end = 0x1C8230 },
1025 	{ .start = 0x1C8510, .end = 0x1C8550 },
1026 	{ .start = 0x1D0030, .end = 0x1D0030 },
1027 	{ .start = 0x1D0230, .end = 0x1D0230 },
1028 	{ .start = 0x1D0510, .end = 0x1D0550 },
1029 	{ .start = 0x1D4030, .end = 0x1D4030 },
1030 	{ .start = 0x1D4230, .end = 0x1D4230 },
1031 	{ .start = 0x1D4510, .end = 0x1D4550 },
1032 	{ .start = 0x1D8030, .end = 0x1D8030 },
1033 	{ .start = 0x1D8230, .end = 0x1D8230 },
1034 	{ .start = 0x1D8510, .end = 0x1D8550 },
1035 };
1036 
1037 static const struct i915_range gen12_shadowed_regs[] = {
1038 	{ .start =   0x2030, .end =   0x2030 },
1039 	{ .start =   0x2510, .end =   0x2550 },
1040 	{ .start =   0xA008, .end =   0xA00C },
1041 	{ .start =   0xA188, .end =   0xA188 },
1042 	{ .start =   0xA278, .end =   0xA278 },
1043 	{ .start =   0xA540, .end =   0xA56C },
1044 	{ .start =   0xC4C8, .end =   0xC4C8 },
1045 	{ .start =   0xC4D4, .end =   0xC4D4 },
1046 	{ .start =   0xC600, .end =   0xC600 },
1047 	{ .start =  0x22030, .end =  0x22030 },
1048 	{ .start =  0x22510, .end =  0x22550 },
1049 	{ .start = 0x1C0030, .end = 0x1C0030 },
1050 	{ .start = 0x1C0510, .end = 0x1C0550 },
1051 	{ .start = 0x1C4030, .end = 0x1C4030 },
1052 	{ .start = 0x1C4510, .end = 0x1C4550 },
1053 	{ .start = 0x1C8030, .end = 0x1C8030 },
1054 	{ .start = 0x1C8510, .end = 0x1C8550 },
1055 	{ .start = 0x1D0030, .end = 0x1D0030 },
1056 	{ .start = 0x1D0510, .end = 0x1D0550 },
1057 	{ .start = 0x1D4030, .end = 0x1D4030 },
1058 	{ .start = 0x1D4510, .end = 0x1D4550 },
1059 	{ .start = 0x1D8030, .end = 0x1D8030 },
1060 	{ .start = 0x1D8510, .end = 0x1D8550 },
1061 
1062 	/*
1063 	 * The rest of these ranges are specific to Xe_HP and beyond, but
1064 	 * are reserved/unused ranges on earlier gen12 platforms, so they can
1065 	 * be safely added to the gen12 table.
1066 	 */
1067 	{ .start = 0x1E0030, .end = 0x1E0030 },
1068 	{ .start = 0x1E0510, .end = 0x1E0550 },
1069 	{ .start = 0x1E4030, .end = 0x1E4030 },
1070 	{ .start = 0x1E4510, .end = 0x1E4550 },
1071 	{ .start = 0x1E8030, .end = 0x1E8030 },
1072 	{ .start = 0x1E8510, .end = 0x1E8550 },
1073 	{ .start = 0x1F0030, .end = 0x1F0030 },
1074 	{ .start = 0x1F0510, .end = 0x1F0550 },
1075 	{ .start = 0x1F4030, .end = 0x1F4030 },
1076 	{ .start = 0x1F4510, .end = 0x1F4550 },
1077 	{ .start = 0x1F8030, .end = 0x1F8030 },
1078 	{ .start = 0x1F8510, .end = 0x1F8550 },
1079 };
1080 
1081 static const struct i915_range dg2_shadowed_regs[] = {
1082 	{ .start =   0x2030, .end =   0x2030 },
1083 	{ .start =   0x2510, .end =   0x2550 },
1084 	{ .start =   0xA008, .end =   0xA00C },
1085 	{ .start =   0xA188, .end =   0xA188 },
1086 	{ .start =   0xA278, .end =   0xA278 },
1087 	{ .start =   0xA540, .end =   0xA56C },
1088 	{ .start =   0xC4C8, .end =   0xC4C8 },
1089 	{ .start =   0xC4E0, .end =   0xC4E0 },
1090 	{ .start =   0xC600, .end =   0xC600 },
1091 	{ .start =   0xC658, .end =   0xC658 },
1092 	{ .start =  0x22030, .end =  0x22030 },
1093 	{ .start =  0x22510, .end =  0x22550 },
1094 	{ .start = 0x1C0030, .end = 0x1C0030 },
1095 	{ .start = 0x1C0510, .end = 0x1C0550 },
1096 	{ .start = 0x1C4030, .end = 0x1C4030 },
1097 	{ .start = 0x1C4510, .end = 0x1C4550 },
1098 	{ .start = 0x1C8030, .end = 0x1C8030 },
1099 	{ .start = 0x1C8510, .end = 0x1C8550 },
1100 	{ .start = 0x1D0030, .end = 0x1D0030 },
1101 	{ .start = 0x1D0510, .end = 0x1D0550 },
1102 	{ .start = 0x1D4030, .end = 0x1D4030 },
1103 	{ .start = 0x1D4510, .end = 0x1D4550 },
1104 	{ .start = 0x1D8030, .end = 0x1D8030 },
1105 	{ .start = 0x1D8510, .end = 0x1D8550 },
1106 	{ .start = 0x1E0030, .end = 0x1E0030 },
1107 	{ .start = 0x1E0510, .end = 0x1E0550 },
1108 	{ .start = 0x1E4030, .end = 0x1E4030 },
1109 	{ .start = 0x1E4510, .end = 0x1E4550 },
1110 	{ .start = 0x1E8030, .end = 0x1E8030 },
1111 	{ .start = 0x1E8510, .end = 0x1E8550 },
1112 	{ .start = 0x1F0030, .end = 0x1F0030 },
1113 	{ .start = 0x1F0510, .end = 0x1F0550 },
1114 	{ .start = 0x1F4030, .end = 0x1F4030 },
1115 	{ .start = 0x1F4510, .end = 0x1F4550 },
1116 	{ .start = 0x1F8030, .end = 0x1F8030 },
1117 	{ .start = 0x1F8510, .end = 0x1F8550 },
1118 };
1119 
1120 static const struct i915_range mtl_shadowed_regs[] = {
1121 	{ .start =   0x2030, .end =   0x2030 },
1122 	{ .start =   0x2510, .end =   0x2550 },
1123 	{ .start =   0xA008, .end =   0xA00C },
1124 	{ .start =   0xA188, .end =   0xA188 },
1125 	{ .start =   0xA278, .end =   0xA278 },
1126 	{ .start =   0xA540, .end =   0xA56C },
1127 	{ .start =   0xC050, .end =   0xC050 },
1128 	{ .start =   0xC340, .end =   0xC340 },
1129 	{ .start =   0xC4C8, .end =   0xC4C8 },
1130 	{ .start =   0xC4E0, .end =   0xC4E0 },
1131 	{ .start =   0xC600, .end =   0xC600 },
1132 	{ .start =   0xC658, .end =   0xC658 },
1133 	{ .start =   0xCFD4, .end =   0xCFDC },
1134 	{ .start =  0x22030, .end =  0x22030 },
1135 	{ .start =  0x22510, .end =  0x22550 },
1136 };
1137 
1138 static const struct i915_range xelpmp_shadowed_regs[] = {
1139 	{ .start = 0x1C0030, .end = 0x1C0030 },
1140 	{ .start = 0x1C0510, .end = 0x1C0550 },
1141 	{ .start = 0x1C8030, .end = 0x1C8030 },
1142 	{ .start = 0x1C8510, .end = 0x1C8550 },
1143 	{ .start = 0x1D0030, .end = 0x1D0030 },
1144 	{ .start = 0x1D0510, .end = 0x1D0550 },
1145 	{ .start = 0x38A008, .end = 0x38A00C },
1146 	{ .start = 0x38A188, .end = 0x38A188 },
1147 	{ .start = 0x38A278, .end = 0x38A278 },
1148 	{ .start = 0x38A540, .end = 0x38A56C },
1149 	{ .start = 0x38A618, .end = 0x38A618 },
1150 	{ .start = 0x38C050, .end = 0x38C050 },
1151 	{ .start = 0x38C340, .end = 0x38C340 },
1152 	{ .start = 0x38C4C8, .end = 0x38C4C8 },
1153 	{ .start = 0x38C4E0, .end = 0x38C4E4 },
1154 	{ .start = 0x38C600, .end = 0x38C600 },
1155 	{ .start = 0x38C658, .end = 0x38C658 },
1156 	{ .start = 0x38CFD4, .end = 0x38CFDC },
1157 };
1158 
1159 static int mmio_range_cmp(u32 key, const struct i915_range *range)
1160 {
1161 	if (key < range->start)
1162 		return -1;
1163 	else if (key > range->end)
1164 		return 1;
1165 	else
1166 		return 0;
1167 }
1168 
1169 static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1170 {
1171 	if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1172 		return false;
1173 
1174 	if (IS_GSI_REG(offset))
1175 		offset += uncore->gsi_offset;
1176 
1177 	return BSEARCH(offset,
1178 		       uncore->shadowed_reg_table,
1179 		       uncore->shadowed_reg_table_entries,
1180 		       mmio_range_cmp);
1181 }
1182 
1183 static enum forcewake_domains
1184 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1185 {
1186 	return FORCEWAKE_RENDER;
1187 }
1188 
1189 #define __fwtable_reg_read_fw_domains(uncore, offset) \
1190 ({ \
1191 	enum forcewake_domains __fwd = 0; \
1192 	if (NEEDS_FORCE_WAKE((offset))) \
1193 		__fwd = find_fw_domain(uncore, offset); \
1194 	__fwd; \
1195 })
1196 
1197 #define __fwtable_reg_write_fw_domains(uncore, offset) \
1198 ({ \
1199 	enum forcewake_domains __fwd = 0; \
1200 	const u32 __offset = (offset); \
1201 	if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1202 		__fwd = find_fw_domain(uncore, __offset); \
1203 	__fwd; \
1204 })
1205 
1206 #define GEN_FW_RANGE(s, e, d) \
1207 	{ .start = (s), .end = (e), .domains = (d) }
1208 
1209 /*
1210  * All platforms' forcewake tables below must be sorted by offset ranges.
1211  * Furthermore, new forcewake tables added should be "watertight" and have
1212  * no gaps between ranges.
1213  *
1214  * When there are multiple consecutive ranges listed in the bspec with
1215  * the same forcewake domain, it is customary to combine them into a single
1216  * row in the tables below to keep the tables small and lookups fast.
1217  * Likewise, reserved/unused ranges may be combined with the preceding and/or
1218  * following ranges since the driver will never be making MMIO accesses in
1219  * those ranges.
1220  *
1221  * For example, if the bspec were to list:
1222  *
1223  *    ...
1224  *    0x1000 - 0x1fff:  GT
1225  *    0x2000 - 0x2cff:  GT
1226  *    0x2d00 - 0x2fff:  unused/reserved
1227  *    0x3000 - 0xffff:  GT
1228  *    ...
1229  *
1230  * these could all be represented by a single line in the code:
1231  *
1232  *   GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
1233  *
1234  * When adding new forcewake tables here, please also add them to
1235  * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
1236  * scanned for obvious mistakes or typos by the selftests.
1237  */
1238 
1239 static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1240 	GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1241 };
1242 
1243 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1244 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1245 	GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
1246 	GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
1247 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1248 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
1249 	GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
1250 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1251 };
1252 
1253 static const struct intel_forcewake_range __chv_fw_ranges[] = {
1254 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1255 	GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1256 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1257 	GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1258 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1259 	GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1260 	GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1261 	GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1262 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1263 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1264 	GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1265 	GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1266 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1267 	GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1268 	GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1269 	GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1270 };
1271 
1272 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1273 	GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1274 	GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1275 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1276 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1277 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1278 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1279 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1280 	GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1281 	GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1282 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1283 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1284 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1285 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1286 	GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1287 	GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1288 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1289 	GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1290 	GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1291 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1292 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1293 	GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1294 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1295 	GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1296 	GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1297 	GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1298 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1299 	GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1300 	GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1301 	GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1302 	GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1303 	GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1304 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1305 };
1306 
1307 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1308 	GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1309 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1310 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1311 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1312 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1313 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1314 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1315 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1316 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1317 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1318 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1319 	GEN_FW_RANGE(0x8800, 0x8bff, 0),
1320 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1321 	GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1322 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1323 	GEN_FW_RANGE(0x9560, 0x95ff, 0),
1324 	GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1325 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1326 	GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1327 	GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1328 	GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1329 	GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1330 	GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1331 	GEN_FW_RANGE(0x24000, 0x2407f, 0),
1332 	GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1333 	GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1334 	GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1335 	GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1336 	GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1337 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1338 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1339 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1340 	GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1341 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1342 	GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1343 };
1344 
1345 static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1346 	GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1347 		0x0   -  0xaff: reserved
1348 		0xb00 - 0x1fff: always on */
1349 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1350 	GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1351 	GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1352 	GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1353 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1354 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1355 		0x4000 - 0x48ff: gt
1356 		0x4900 - 0x51ff: reserved */
1357 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1358 		0x5200 - 0x53ff: render
1359 		0x5400 - 0x54ff: reserved
1360 		0x5500 - 0x7fff: render */
1361 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1362 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1363 	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1364 		0x8160 - 0x817f: reserved
1365 		0x8180 - 0x81ff: always on */
1366 	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1367 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1368 	GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1369 		0x8500 - 0x87ff: gt
1370 		0x8800 - 0x8fff: reserved
1371 		0x9000 - 0x947f: gt
1372 		0x9480 - 0x94cf: reserved */
1373 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1374 	GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1375 		0x9560 - 0x95ff: always on
1376 		0x9600 - 0x97ff: reserved */
1377 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1378 	GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1379 	GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1380 		0xb400 - 0xbf7f: gt
1381 		0xb480 - 0xbfff: reserved
1382 		0xc000 - 0xcfff: gt */
1383 	GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1384 	GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1385 	GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1386 	GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1387 		0xdc00 - 0xddff: render
1388 		0xde00 - 0xde7f: reserved
1389 		0xde80 - 0xe8ff: render
1390 		0xe900 - 0xefff: reserved */
1391 	GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1392 		 0xf000 - 0xffff: gt
1393 		0x10000 - 0x147ff: reserved */
1394 	GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1395 		0x14800 - 0x14fff: render
1396 		0x15000 - 0x16dff: reserved
1397 		0x16e00 - 0x1bfff: render
1398 		0x1c000 - 0x1ffff: reserved */
1399 	GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1400 	GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1401 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1402 	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1403 		0x24000 - 0x2407f: always on
1404 		0x24080 - 0x2417f: reserved */
1405 	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1406 		0x24180 - 0x241ff: gt
1407 		0x24200 - 0x249ff: reserved */
1408 	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1409 		0x24a00 - 0x24a7f: render
1410 		0x24a80 - 0x251ff: reserved */
1411 	GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1412 		0x25200 - 0x252ff: gt
1413 		0x25300 - 0x255ff: reserved */
1414 	GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1415 	GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1416 		0x25680 - 0x256ff: VD2
1417 		0x25700 - 0x259ff: reserved */
1418 	GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1419 	GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1420 		0x25a80 - 0x25aff: VD2
1421 		0x25b00 - 0x2ffff: reserved */
1422 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1423 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1424 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1425 		0x1c0000 - 0x1c2bff: VD0
1426 		0x1c2c00 - 0x1c2cff: reserved
1427 		0x1c2d00 - 0x1c2dff: VD0
1428 		0x1c2e00 - 0x1c3eff: reserved
1429 		0x1c3f00 - 0x1c3fff: VD0 */
1430 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1431 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1432 		0x1c8000 - 0x1ca0ff: VE0
1433 		0x1ca100 - 0x1cbeff: reserved
1434 		0x1cbf00 - 0x1cbfff: VE0 */
1435 	GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1436 		0x1cc000 - 0x1ccfff: VD0
1437 		0x1cd000 - 0x1cffff: reserved */
1438 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1439 		0x1d0000 - 0x1d2bff: VD2
1440 		0x1d2c00 - 0x1d2cff: reserved
1441 		0x1d2d00 - 0x1d2dff: VD2
1442 		0x1d2e00 - 0x1d3eff: reserved
1443 		0x1d3f00 - 0x1d3fff: VD2 */
1444 };
1445 
1446 static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1447 	GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1448 		  0x0 -  0xaff: reserved
1449 		0xb00 - 0x1fff: always on */
1450 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1451 	GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),
1452 	GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*
1453 		0x4b00 - 0x4fff: reserved
1454 		0x5000 - 0x51ff: always on */
1455 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1456 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1457 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1458 	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1459 		0x8160 - 0x817f: reserved
1460 		0x8180 - 0x81ff: always on */
1461 	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1462 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1463 	GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*
1464 		0x8500 - 0x87ff: gt
1465 		0x8800 - 0x8c7f: reserved
1466 		0x8c80 - 0x8cff: gt (DG2 only) */
1467 	GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*
1468 		0x8d00 - 0x8dff: render (DG2 only)
1469 		0x8e00 - 0x8fff: reserved */
1470 	GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*
1471 		0x9000 - 0x947f: gt
1472 		0x9480 - 0x94cf: reserved */
1473 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1474 	GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1475 		0x9560 - 0x95ff: always on
1476 		0x9600 - 0x967f: reserved */
1477 	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1478 		0x9680 - 0x96ff: render
1479 		0x9700 - 0x97ff: reserved */
1480 	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1481 		0x9800 - 0xb4ff: gt
1482 		0xb500 - 0xbfff: reserved
1483 		0xc000 - 0xcfff: gt */
1484 	GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1485 	GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1486 	GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1487 	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1488 	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1489 		0xdd00 - 0xddff: gt
1490 		0xde00 - 0xde7f: reserved */
1491 	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1492 		0xde80 - 0xdfff: render
1493 		0xe000 - 0xe0ff: reserved
1494 		0xe100 - 0xe8ff: render */
1495 	GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*
1496 		0xe900 - 0xe9ff: gt
1497 		0xea00 - 0xefff: reserved
1498 		0xf000 - 0xffff: gt */
1499 	GEN_FW_RANGE(0x10000, 0x12fff, 0), /*
1500 		0x10000 - 0x11fff: reserved
1501 		0x12000 - 0x127ff: always on
1502 		0x12800 - 0x12fff: reserved */
1503 	GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0),
1504 	GEN_FW_RANGE(0x13200, 0x147ff, FORCEWAKE_MEDIA_VDBOX2), /*
1505 		0x13200 - 0x133ff: VD2 (DG2 only)
1506 		0x13400 - 0x147ff: reserved */
1507 	GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),
1508 	GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*
1509 		0x15000 - 0x15fff: gt (DG2 only)
1510 		0x16000 - 0x16dff: reserved */
1511 	GEN_FW_RANGE(0x16e00, 0x21fff, FORCEWAKE_RENDER), /*
1512 		0x16e00 - 0x1ffff: render
1513 		0x20000 - 0x21fff: reserved */
1514 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1515 	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1516 		0x24000 - 0x2407f: always on
1517 		0x24080 - 0x2417f: reserved */
1518 	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1519 		0x24180 - 0x241ff: gt
1520 		0x24200 - 0x249ff: reserved */
1521 	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1522 		0x24a00 - 0x24a7f: render
1523 		0x24a80 - 0x251ff: reserved */
1524 	GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*
1525 		0x25200 - 0x252ff: gt
1526 		0x25300 - 0x25fff: reserved */
1527 	GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
1528 		0x26000 - 0x27fff: render
1529 		0x28000 - 0x29fff: reserved
1530 		0x2a000 - 0x2ffff: undocumented */
1531 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1532 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1533 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1534 		0x1c0000 - 0x1c2bff: VD0
1535 		0x1c2c00 - 0x1c2cff: reserved
1536 		0x1c2d00 - 0x1c2dff: VD0
1537 		0x1c2e00 - 0x1c3eff: VD0
1538 		0x1c3f00 - 0x1c3fff: VD0 */
1539 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*
1540 		0x1c4000 - 0x1c6bff: VD1
1541 		0x1c6c00 - 0x1c6cff: reserved
1542 		0x1c6d00 - 0x1c6dff: VD1
1543 		0x1c6e00 - 0x1c7fff: reserved */
1544 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1545 		0x1c8000 - 0x1ca0ff: VE0
1546 		0x1ca100 - 0x1cbfff: reserved */
1547 	GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),
1548 	GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),
1549 	GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),
1550 	GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),
1551 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1552 		0x1d0000 - 0x1d2bff: VD2
1553 		0x1d2c00 - 0x1d2cff: reserved
1554 		0x1d2d00 - 0x1d2dff: VD2
1555 		0x1d2e00 - 0x1d3dff: VD2
1556 		0x1d3e00 - 0x1d3eff: reserved
1557 		0x1d3f00 - 0x1d3fff: VD2 */
1558 	GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*
1559 		0x1d4000 - 0x1d6bff: VD3
1560 		0x1d6c00 - 0x1d6cff: reserved
1561 		0x1d6d00 - 0x1d6dff: VD3
1562 		0x1d6e00 - 0x1d7fff: reserved */
1563 	GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*
1564 		0x1d8000 - 0x1da0ff: VE1
1565 		0x1da100 - 0x1dffff: reserved */
1566 	GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*
1567 		0x1e0000 - 0x1e2bff: VD4
1568 		0x1e2c00 - 0x1e2cff: reserved
1569 		0x1e2d00 - 0x1e2dff: VD4
1570 		0x1e2e00 - 0x1e3eff: reserved
1571 		0x1e3f00 - 0x1e3fff: VD4 */
1572 	GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*
1573 		0x1e4000 - 0x1e6bff: VD5
1574 		0x1e6c00 - 0x1e6cff: reserved
1575 		0x1e6d00 - 0x1e6dff: VD5
1576 		0x1e6e00 - 0x1e7fff: reserved */
1577 	GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*
1578 		0x1e8000 - 0x1ea0ff: VE2
1579 		0x1ea100 - 0x1effff: reserved */
1580 	GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*
1581 		0x1f0000 - 0x1f2bff: VD6
1582 		0x1f2c00 - 0x1f2cff: reserved
1583 		0x1f2d00 - 0x1f2dff: VD6
1584 		0x1f2e00 - 0x1f3eff: reserved
1585 		0x1f3f00 - 0x1f3fff: VD6 */
1586 	GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*
1587 		0x1f4000 - 0x1f6bff: VD7
1588 		0x1f6c00 - 0x1f6cff: reserved
1589 		0x1f6d00 - 0x1f6dff: VD7
1590 		0x1f6e00 - 0x1f7fff: reserved */
1591 	GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1592 };
1593 
1594 static const struct intel_forcewake_range __mtl_fw_ranges[] = {
1595 	GEN_FW_RANGE(0x0, 0xaff, 0),
1596 	GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1597 	GEN_FW_RANGE(0xc00, 0xfff, 0),
1598 	GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1599 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1600 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1601 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1602 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1603 		0x4000 - 0x48ff: render
1604 		0x4900 - 0x51ff: reserved */
1605 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1606 		0x5200 - 0x53ff: render
1607 		0x5400 - 0x54ff: reserved
1608 		0x5500 - 0x7fff: render */
1609 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1610 	GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), /*
1611 		0x8140 - 0x815f: render
1612 		0x8160 - 0x817f: reserved */
1613 	GEN_FW_RANGE(0x8180, 0x81ff, 0),
1614 	GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1615 		0x8200 - 0x87ff: gt
1616 		0x8800 - 0x8dff: reserved
1617 		0x8e00 - 0x8f7f: gt
1618 		0x8f80 - 0x8fff: reserved
1619 		0x9000 - 0x947f: gt
1620 		0x9480 - 0x94cf: reserved */
1621 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1622 	GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1623 		0x9560 - 0x95ff: always on
1624 		0x9600 - 0x967f: reserved */
1625 	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1626 		0x9680 - 0x96ff: render
1627 		0x9700 - 0x97ff: reserved */
1628 	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1629 		0x9800 - 0xb4ff: gt
1630 		0xb500 - 0xbfff: reserved
1631 		0xc000 - 0xcfff: gt */
1632 	GEN_FW_RANGE(0xd000, 0xd7ff, 0), /*
1633 		0xd000 - 0xd3ff: always on
1634 		0xd400 - 0xd7ff: reserved */
1635 	GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1636 	GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1637 	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1638 	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1639 		0xdd00 - 0xddff: gt
1640 		0xde00 - 0xde7f: reserved */
1641 	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1642 		0xde80 - 0xdfff: render
1643 		0xe000 - 0xe0ff: reserved
1644 		0xe100 - 0xe8ff: render */
1645 	GEN_FW_RANGE(0xe900, 0xe9ff, FORCEWAKE_GT),
1646 	GEN_FW_RANGE(0xea00, 0x147ff, 0), /*
1647 		 0xea00 - 0x11fff: reserved
1648 		0x12000 - 0x127ff: always on
1649 		0x12800 - 0x147ff: reserved */
1650 	GEN_FW_RANGE(0x14800, 0x19fff, FORCEWAKE_GT), /*
1651 		0x14800 - 0x153ff: gt
1652 		0x15400 - 0x19fff: reserved */
1653 	GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1654 		0x1a000 - 0x1bfff: render
1655 		0x1c000 - 0x21fff: reserved */
1656 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1657 	GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
1658 		0x24000 - 0x2407f: always on
1659 		0x24080 - 0x2ffff: reserved */
1660 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1661 	GEN_FW_RANGE(0x40000, 0x1901ef, 0),
1662 	GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT)
1663 		/* FIXME: WA to wake GT while triggering H2G */
1664 };
1665 
1666 /*
1667  * Note that the register ranges here are the final offsets after
1668  * translation of the GSI block to the 0x380000 offset.
1669  *
1670  * NOTE:  There are a couple MCR ranges near the bottom of this table
1671  * that need to power up either VD0 or VD2 depending on which replicated
1672  * instance of the register we're trying to access.  Our forcewake logic
1673  * at the moment doesn't have a good way to take steering into consideration,
1674  * and the driver doesn't even access any registers in those ranges today,
1675  * so for now we just mark those ranges as FORCEWAKE_ALL.  That will ensure
1676  * proper operation if we do start using the ranges in the future, and we
1677  * can determine at that time whether it's worth adding extra complexity to
1678  * the forcewake handling to take steering into consideration.
1679  */
1680 static const struct intel_forcewake_range __xelpmp_fw_ranges[] = {
1681 	GEN_FW_RANGE(0x0, 0x115fff, 0), /* render GT range */
1682 	GEN_FW_RANGE(0x116000, 0x11ffff, FORCEWAKE_GSC), /*
1683 		0x116000 - 0x117fff: gsc
1684 		0x118000 - 0x119fff: reserved
1685 		0x11a000 - 0x11efff: gsc
1686 		0x11f000 - 0x11ffff: reserved */
1687 	GEN_FW_RANGE(0x120000, 0x1bffff, 0), /* non-GT range */
1688 	GEN_FW_RANGE(0x1c0000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX0), /*
1689 		0x1c0000 - 0x1c3dff: VD0
1690 		0x1c3e00 - 0x1c3eff: reserved
1691 		0x1c3f00 - 0x1c3fff: VD0
1692 		0x1c4000 - 0x1c7fff: reserved */
1693 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1694 		0x1c8000 - 0x1ca0ff: VE0
1695 		0x1ca100 - 0x1cbfff: reserved */
1696 	GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1697 		0x1cc000 - 0x1cdfff: VD0
1698 		0x1ce000 - 0x1cffff: reserved */
1699 	GEN_FW_RANGE(0x1d0000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX2), /*
1700 		0x1d0000 - 0x1d3dff: VD2
1701 		0x1d3e00 - 0x1d3eff: reserved
1702 		0x1d4000 - 0x1d7fff: VD2 */
1703 	GEN_FW_RANGE(0x1d8000, 0x1da0ff, FORCEWAKE_MEDIA_VEBOX1),
1704 	GEN_FW_RANGE(0x1da100, 0x380aff, 0), /*
1705 		0x1da100 - 0x23ffff: reserved
1706 		0x240000 - 0x37ffff: non-GT range
1707 		0x380000 - 0x380aff: reserved */
1708 	GEN_FW_RANGE(0x380b00, 0x380bff, FORCEWAKE_GT),
1709 	GEN_FW_RANGE(0x380c00, 0x380fff, 0),
1710 	GEN_FW_RANGE(0x381000, 0x38817f, FORCEWAKE_GT), /*
1711 		0x381000 - 0x381fff: gt
1712 		0x382000 - 0x383fff: reserved
1713 		0x384000 - 0x384aff: gt
1714 		0x384b00 - 0x3851ff: reserved
1715 		0x385200 - 0x3871ff: gt
1716 		0x387200 - 0x387fff: reserved
1717 		0x388000 - 0x38813f: gt
1718 		0x388140 - 0x38817f: reserved */
1719 	GEN_FW_RANGE(0x388180, 0x3882ff, 0), /*
1720 		0x388180 - 0x3881ff: always on
1721 		0x388200 - 0x3882ff: reserved */
1722 	GEN_FW_RANGE(0x388300, 0x38955f, FORCEWAKE_GT), /*
1723 		0x388300 - 0x38887f: gt
1724 		0x388880 - 0x388fff: reserved
1725 		0x389000 - 0x38947f: gt
1726 		0x389480 - 0x38955f: reserved */
1727 	GEN_FW_RANGE(0x389560, 0x389fff, 0), /*
1728 		0x389560 - 0x3895ff: always on
1729 		0x389600 - 0x389fff: reserved */
1730 	GEN_FW_RANGE(0x38a000, 0x38cfff, FORCEWAKE_GT), /*
1731 		0x38a000 - 0x38afff: gt
1732 		0x38b000 - 0x38bfff: reserved
1733 		0x38c000 - 0x38cfff: gt */
1734 	GEN_FW_RANGE(0x38d000, 0x38d11f, 0),
1735 	GEN_FW_RANGE(0x38d120, 0x391fff, FORCEWAKE_GT), /*
1736 		0x38d120 - 0x38dfff: gt
1737 		0x38e000 - 0x38efff: reserved
1738 		0x38f000 - 0x38ffff: gt
1739 		0x389000 - 0x391fff: reserved */
1740 	GEN_FW_RANGE(0x392000, 0x392fff, 0), /*
1741 		0x392000 - 0x3927ff: always on
1742 		0x392800 - 0x292fff: reserved */
1743 	GEN_FW_RANGE(0x393000, 0x3931ff, FORCEWAKE_GT),
1744 	GEN_FW_RANGE(0x393200, 0x39323f, FORCEWAKE_ALL), /* instance-based, see note above */
1745 	GEN_FW_RANGE(0x393240, 0x3933ff, FORCEWAKE_GT),
1746 	GEN_FW_RANGE(0x393400, 0x3934ff, FORCEWAKE_ALL), /* instance-based, see note above */
1747 	GEN_FW_RANGE(0x393500, 0x393c7f, 0), /*
1748 		0x393500 - 0x393bff: reserved
1749 		0x393c00 - 0x393c7f: always on */
1750 	GEN_FW_RANGE(0x393c80, 0x393dff, FORCEWAKE_GT),
1751 };
1752 
1753 static void
1754 ilk_dummy_write(struct intel_uncore *uncore)
1755 {
1756 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1757 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
1758 	 * hence harmless to write 0 into. */
1759 	__raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
1760 }
1761 
1762 static void
1763 __unclaimed_reg_debug(struct intel_uncore *uncore,
1764 		      const i915_reg_t reg,
1765 		      const bool read)
1766 {
1767 	if (drm_WARN(&uncore->i915->drm,
1768 		     check_for_unclaimed_mmio(uncore),
1769 		     "Unclaimed %s register 0x%x\n",
1770 		     read ? "read from" : "write to",
1771 		     i915_mmio_reg_offset(reg)))
1772 		/* Only report the first N failures */
1773 		uncore->i915->params.mmio_debug--;
1774 }
1775 
1776 static void
1777 __unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1778 			       const i915_reg_t reg,
1779 			       const bool read)
1780 {
1781 	if (check_for_unclaimed_mmio(uncore))
1782 		drm_dbg(&uncore->i915->drm,
1783 			"Unclaimed access detected before %s register 0x%x\n",
1784 			read ? "read from" : "write to",
1785 			i915_mmio_reg_offset(reg));
1786 }
1787 
1788 static inline bool __must_check
1789 unclaimed_reg_debug_header(struct intel_uncore *uncore,
1790 			   const i915_reg_t reg, const bool read)
1791 {
1792 	if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
1793 		return false;
1794 
1795 	/* interrupts are disabled and re-enabled around uncore->lock usage */
1796 	lockdep_assert_held(&uncore->lock);
1797 
1798 	spin_lock(&uncore->debug->lock);
1799 	__unclaimed_previous_reg_debug(uncore, reg, read);
1800 
1801 	return true;
1802 }
1803 
1804 static inline void
1805 unclaimed_reg_debug_footer(struct intel_uncore *uncore,
1806 			   const i915_reg_t reg, const bool read)
1807 {
1808 	/* interrupts are disabled and re-enabled around uncore->lock usage */
1809 	lockdep_assert_held(&uncore->lock);
1810 
1811 	__unclaimed_reg_debug(uncore, reg, read);
1812 	spin_unlock(&uncore->debug->lock);
1813 }
1814 
1815 #define __vgpu_read(x) \
1816 static u##x \
1817 vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1818 	u##x val = __raw_uncore_read##x(uncore, reg); \
1819 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1820 	return val; \
1821 }
1822 __vgpu_read(8)
1823 __vgpu_read(16)
1824 __vgpu_read(32)
1825 __vgpu_read(64)
1826 
1827 #define GEN2_READ_HEADER(x) \
1828 	u##x val = 0; \
1829 	assert_rpm_wakelock_held(uncore->rpm);
1830 
1831 #define GEN2_READ_FOOTER \
1832 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1833 	return val
1834 
1835 #define __gen2_read(x) \
1836 static u##x \
1837 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1838 	GEN2_READ_HEADER(x); \
1839 	val = __raw_uncore_read##x(uncore, reg); \
1840 	GEN2_READ_FOOTER; \
1841 }
1842 
1843 #define __gen5_read(x) \
1844 static u##x \
1845 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1846 	GEN2_READ_HEADER(x); \
1847 	ilk_dummy_write(uncore); \
1848 	val = __raw_uncore_read##x(uncore, reg); \
1849 	GEN2_READ_FOOTER; \
1850 }
1851 
1852 __gen5_read(8)
1853 __gen5_read(16)
1854 __gen5_read(32)
1855 __gen5_read(64)
1856 __gen2_read(8)
1857 __gen2_read(16)
1858 __gen2_read(32)
1859 __gen2_read(64)
1860 
1861 #undef __gen5_read
1862 #undef __gen2_read
1863 
1864 #undef GEN2_READ_FOOTER
1865 #undef GEN2_READ_HEADER
1866 
1867 #define GEN6_READ_HEADER(x) \
1868 	u32 offset = i915_mmio_reg_offset(reg); \
1869 	unsigned long irqflags; \
1870 	bool unclaimed_reg_debug; \
1871 	u##x val = 0; \
1872 	assert_rpm_wakelock_held(uncore->rpm); \
1873 	spin_lock_irqsave(&uncore->lock, irqflags); \
1874 	unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true)
1875 
1876 #define GEN6_READ_FOOTER \
1877 	if (unclaimed_reg_debug) \
1878 		unclaimed_reg_debug_footer(uncore, reg, true);	\
1879 	spin_unlock_irqrestore(&uncore->lock, irqflags); \
1880 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1881 	return val
1882 
1883 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1884 					enum forcewake_domains fw_domains)
1885 {
1886 	struct intel_uncore_forcewake_domain *domain;
1887 	unsigned int tmp;
1888 
1889 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1890 
1891 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1892 		fw_domain_arm_timer(domain);
1893 
1894 	fw_domains_get(uncore, fw_domains);
1895 }
1896 
1897 static inline void __force_wake_auto(struct intel_uncore *uncore,
1898 				     enum forcewake_domains fw_domains)
1899 {
1900 	GEM_BUG_ON(!fw_domains);
1901 
1902 	/* Turn on all requested but inactive supported forcewake domains. */
1903 	fw_domains &= uncore->fw_domains;
1904 	fw_domains &= ~uncore->fw_domains_active;
1905 
1906 	if (fw_domains)
1907 		___force_wake_auto(uncore, fw_domains);
1908 }
1909 
1910 #define __gen_fwtable_read(x) \
1911 static u##x \
1912 fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
1913 { \
1914 	enum forcewake_domains fw_engine; \
1915 	GEN6_READ_HEADER(x); \
1916 	fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
1917 	if (fw_engine) \
1918 		__force_wake_auto(uncore, fw_engine); \
1919 	val = __raw_uncore_read##x(uncore, reg); \
1920 	GEN6_READ_FOOTER; \
1921 }
1922 
1923 static enum forcewake_domains
1924 fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1925 	return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
1926 }
1927 
1928 __gen_fwtable_read(8)
1929 __gen_fwtable_read(16)
1930 __gen_fwtable_read(32)
1931 __gen_fwtable_read(64)
1932 
1933 #undef __gen_fwtable_read
1934 #undef GEN6_READ_FOOTER
1935 #undef GEN6_READ_HEADER
1936 
1937 #define GEN2_WRITE_HEADER \
1938 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1939 	assert_rpm_wakelock_held(uncore->rpm); \
1940 
1941 #define GEN2_WRITE_FOOTER
1942 
1943 #define __gen2_write(x) \
1944 static void \
1945 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1946 	GEN2_WRITE_HEADER; \
1947 	__raw_uncore_write##x(uncore, reg, val); \
1948 	GEN2_WRITE_FOOTER; \
1949 }
1950 
1951 #define __gen5_write(x) \
1952 static void \
1953 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1954 	GEN2_WRITE_HEADER; \
1955 	ilk_dummy_write(uncore); \
1956 	__raw_uncore_write##x(uncore, reg, val); \
1957 	GEN2_WRITE_FOOTER; \
1958 }
1959 
1960 __gen5_write(8)
1961 __gen5_write(16)
1962 __gen5_write(32)
1963 __gen2_write(8)
1964 __gen2_write(16)
1965 __gen2_write(32)
1966 
1967 #undef __gen5_write
1968 #undef __gen2_write
1969 
1970 #undef GEN2_WRITE_FOOTER
1971 #undef GEN2_WRITE_HEADER
1972 
1973 #define GEN6_WRITE_HEADER \
1974 	u32 offset = i915_mmio_reg_offset(reg); \
1975 	unsigned long irqflags; \
1976 	bool unclaimed_reg_debug; \
1977 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1978 	assert_rpm_wakelock_held(uncore->rpm); \
1979 	spin_lock_irqsave(&uncore->lock, irqflags); \
1980 	unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false)
1981 
1982 #define GEN6_WRITE_FOOTER \
1983 	if (unclaimed_reg_debug) \
1984 		unclaimed_reg_debug_footer(uncore, reg, false); \
1985 	spin_unlock_irqrestore(&uncore->lock, irqflags)
1986 
1987 #define __gen6_write(x) \
1988 static void \
1989 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1990 	GEN6_WRITE_HEADER; \
1991 	if (NEEDS_FORCE_WAKE(offset)) \
1992 		__gen6_gt_wait_for_fifo(uncore); \
1993 	__raw_uncore_write##x(uncore, reg, val); \
1994 	GEN6_WRITE_FOOTER; \
1995 }
1996 __gen6_write(8)
1997 __gen6_write(16)
1998 __gen6_write(32)
1999 
2000 #define __gen_fwtable_write(x) \
2001 static void \
2002 fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2003 	enum forcewake_domains fw_engine; \
2004 	GEN6_WRITE_HEADER; \
2005 	fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
2006 	if (fw_engine) \
2007 		__force_wake_auto(uncore, fw_engine); \
2008 	__raw_uncore_write##x(uncore, reg, val); \
2009 	GEN6_WRITE_FOOTER; \
2010 }
2011 
2012 static enum forcewake_domains
2013 fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
2014 {
2015 	return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
2016 }
2017 
2018 __gen_fwtable_write(8)
2019 __gen_fwtable_write(16)
2020 __gen_fwtable_write(32)
2021 
2022 #undef __gen_fwtable_write
2023 #undef GEN6_WRITE_FOOTER
2024 #undef GEN6_WRITE_HEADER
2025 
2026 #define __vgpu_write(x) \
2027 static void \
2028 vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2029 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2030 	__raw_uncore_write##x(uncore, reg, val); \
2031 }
2032 __vgpu_write(8)
2033 __vgpu_write(16)
2034 __vgpu_write(32)
2035 
2036 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
2037 do { \
2038 	(uncore)->funcs.mmio_writeb = x##_write8; \
2039 	(uncore)->funcs.mmio_writew = x##_write16; \
2040 	(uncore)->funcs.mmio_writel = x##_write32; \
2041 } while (0)
2042 
2043 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
2044 do { \
2045 	(uncore)->funcs.mmio_readb = x##_read8; \
2046 	(uncore)->funcs.mmio_readw = x##_read16; \
2047 	(uncore)->funcs.mmio_readl = x##_read32; \
2048 	(uncore)->funcs.mmio_readq = x##_read64; \
2049 } while (0)
2050 
2051 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
2052 do { \
2053 	ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
2054 	(uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2055 } while (0)
2056 
2057 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
2058 do { \
2059 	ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
2060 	(uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2061 } while (0)
2062 
2063 static int __fw_domain_init(struct intel_uncore *uncore,
2064 			    enum forcewake_domain_id domain_id,
2065 			    i915_reg_t reg_set,
2066 			    i915_reg_t reg_ack)
2067 {
2068 	struct intel_uncore_forcewake_domain *d;
2069 
2070 	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2071 	GEM_BUG_ON(uncore->fw_domain[domain_id]);
2072 
2073 	if (i915_inject_probe_failure(uncore->i915))
2074 		return -ENOMEM;
2075 
2076 	d = kzalloc(sizeof(*d), GFP_KERNEL);
2077 	if (!d)
2078 		return -ENOMEM;
2079 
2080 	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
2081 	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
2082 
2083 	d->uncore = uncore;
2084 	d->wake_count = 0;
2085 	d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2086 	d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
2087 
2088 	d->id = domain_id;
2089 
2090 	BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
2091 	BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
2092 	BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
2093 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
2094 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
2095 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
2096 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
2097 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
2098 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
2099 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
2100 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
2101 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
2102 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
2103 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
2104 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
2105 	BUILD_BUG_ON(FORCEWAKE_GSC != (1 << FW_DOMAIN_ID_GSC));
2106 
2107 	d->mask = BIT(domain_id);
2108 
2109 	hrtimer_setup(&d->timer, intel_uncore_fw_release_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2110 
2111 	uncore->fw_domains |= BIT(domain_id);
2112 
2113 	fw_domain_reset(d);
2114 
2115 	uncore->fw_domain[domain_id] = d;
2116 
2117 	return 0;
2118 }
2119 
2120 static void fw_domain_fini(struct intel_uncore *uncore,
2121 			   enum forcewake_domain_id domain_id)
2122 {
2123 	struct intel_uncore_forcewake_domain *d;
2124 
2125 	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2126 
2127 	d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2128 	if (!d)
2129 		return;
2130 
2131 	uncore->fw_domains &= ~BIT(domain_id);
2132 	drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2133 	drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
2134 	kfree(d);
2135 }
2136 
2137 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2138 {
2139 	struct intel_uncore_forcewake_domain *d;
2140 	int tmp;
2141 
2142 	for_each_fw_domain(d, uncore, tmp)
2143 		fw_domain_fini(uncore, d->id);
2144 }
2145 
2146 static const struct intel_uncore_fw_get uncore_get_fallback = {
2147 	.force_wake_get = fw_domains_get_with_fallback
2148 };
2149 
2150 static const struct intel_uncore_fw_get uncore_get_normal = {
2151 	.force_wake_get = fw_domains_get_normal,
2152 };
2153 
2154 static const struct intel_uncore_fw_get uncore_get_thread_status = {
2155 	.force_wake_get = fw_domains_get_with_thread_status
2156 };
2157 
2158 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
2159 {
2160 	struct drm_i915_private *i915 = uncore->i915;
2161 	int ret = 0;
2162 
2163 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2164 
2165 #define fw_domain_init(uncore__, id__, set__, ack__) \
2166 	(ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2167 
2168 	if (GRAPHICS_VER(i915) >= 11) {
2169 		intel_engine_mask_t emask;
2170 		int i;
2171 
2172 		/* we'll prune the domains of missing engines later */
2173 		emask = uncore->gt->info.engine_mask;
2174 
2175 		uncore->fw_get_funcs = &uncore_get_fallback;
2176 		if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2177 			fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2178 				       FORCEWAKE_GT_GEN9,
2179 				       FORCEWAKE_ACK_GT_MTL);
2180 		else
2181 			fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2182 				       FORCEWAKE_GT_GEN9,
2183 				       FORCEWAKE_ACK_GT_GEN9);
2184 
2185 		if (RCS_MASK(uncore->gt) || CCS_MASK(uncore->gt))
2186 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2187 				       FORCEWAKE_RENDER_GEN9,
2188 				       FORCEWAKE_ACK_RENDER_GEN9);
2189 
2190 		for (i = 0; i < I915_MAX_VCS; i++) {
2191 			if (!__HAS_ENGINE(emask, _VCS(i)))
2192 				continue;
2193 
2194 			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
2195 				       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
2196 				       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
2197 		}
2198 		for (i = 0; i < I915_MAX_VECS; i++) {
2199 			if (!__HAS_ENGINE(emask, _VECS(i)))
2200 				continue;
2201 
2202 			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
2203 				       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
2204 				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
2205 		}
2206 
2207 		if (uncore->gt->type == GT_MEDIA)
2208 			fw_domain_init(uncore, FW_DOMAIN_ID_GSC,
2209 				       FORCEWAKE_REQ_GSC, FORCEWAKE_ACK_GSC);
2210 	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2211 		uncore->fw_get_funcs = &uncore_get_fallback;
2212 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2213 			       FORCEWAKE_RENDER_GEN9,
2214 			       FORCEWAKE_ACK_RENDER_GEN9);
2215 		fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2216 			       FORCEWAKE_GT_GEN9,
2217 			       FORCEWAKE_ACK_GT_GEN9);
2218 		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2219 			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
2220 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
2221 		uncore->fw_get_funcs = &uncore_get_normal;
2222 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2223 			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
2224 		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2225 			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
2226 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2227 		uncore->fw_get_funcs = &uncore_get_thread_status;
2228 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2229 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
2230 	} else if (IS_IVYBRIDGE(i915)) {
2231 		u32 ecobus;
2232 
2233 		/* IVB configs may use multi-threaded forcewake */
2234 
2235 		/* A small trick here - if the bios hasn't configured
2236 		 * MT forcewake, and if the device is in RC6, then
2237 		 * force_wake_mt_get will not wake the device and the
2238 		 * ECOBUS read will return zero. Which will be
2239 		 * (correctly) interpreted by the test below as MT
2240 		 * forcewake being disabled.
2241 		 */
2242 		uncore->fw_get_funcs = &uncore_get_thread_status;
2243 
2244 		/* We need to init first for ECOBUS access and then
2245 		 * determine later if we want to reinit, in case of MT access is
2246 		 * not working. In this stage we don't know which flavour this
2247 		 * ivb is, so it is better to reset also the gen6 fw registers
2248 		 * before the ecobus check.
2249 		 */
2250 
2251 		__raw_uncore_write32(uncore, FORCEWAKE, 0);
2252 		__raw_posting_read(uncore, ECOBUS);
2253 
2254 		ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2255 				       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
2256 		if (ret)
2257 			goto out;
2258 
2259 		spin_lock_irq(&uncore->lock);
2260 		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
2261 		ecobus = __raw_uncore_read32(uncore, ECOBUS);
2262 		fw_domains_put(uncore, FORCEWAKE_RENDER);
2263 		spin_unlock_irq(&uncore->lock);
2264 
2265 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
2266 			drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
2267 			drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
2268 			fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
2269 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2270 				       FORCEWAKE, FORCEWAKE_ACK);
2271 		}
2272 	} else if (GRAPHICS_VER(i915) == 6) {
2273 		uncore->fw_get_funcs = &uncore_get_thread_status;
2274 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2275 			       FORCEWAKE, FORCEWAKE_ACK);
2276 	}
2277 
2278 #undef fw_domain_init
2279 
2280 	/* All future platforms are expected to require complex power gating */
2281 	drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
2282 
2283 out:
2284 	if (ret)
2285 		intel_uncore_fw_domains_fini(uncore);
2286 
2287 	return ret;
2288 }
2289 
2290 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
2291 { \
2292 	(uncore)->fw_domains_table = \
2293 			(struct intel_forcewake_range *)(d); \
2294 	(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2295 }
2296 
2297 #define ASSIGN_SHADOW_TABLE(uncore, d) \
2298 { \
2299 	(uncore)->shadowed_reg_table = d; \
2300 	(uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2301 }
2302 
2303 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2304 					 unsigned long action, void *data)
2305 {
2306 	struct intel_uncore *uncore = container_of(nb,
2307 			struct intel_uncore, pmic_bus_access_nb);
2308 
2309 	switch (action) {
2310 	case MBI_PMIC_BUS_ACCESS_BEGIN:
2311 		/*
2312 		 * forcewake all now to make sure that we don't need to do a
2313 		 * forcewake later which on systems where this notifier gets
2314 		 * called requires the punit to access to the shared pmic i2c
2315 		 * bus, which will be busy after this notification, leading to:
2316 		 * "render: timed out waiting for forcewake ack request."
2317 		 * errors.
2318 		 *
2319 		 * The notifier is unregistered during intel_runtime_suspend(),
2320 		 * so it's ok to access the HW here without holding a RPM
2321 		 * wake reference -> disable wakeref asserts for the time of
2322 		 * the access.
2323 		 */
2324 		disable_rpm_wakeref_asserts(uncore->rpm);
2325 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2326 		enable_rpm_wakeref_asserts(uncore->rpm);
2327 		break;
2328 	case MBI_PMIC_BUS_ACCESS_END:
2329 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2330 		break;
2331 	}
2332 
2333 	return NOTIFY_OK;
2334 }
2335 
2336 static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
2337 {
2338 	iounmap((void __iomem *)regs);
2339 }
2340 
2341 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
2342 {
2343 	struct drm_i915_private *i915 = uncore->i915;
2344 	int mmio_size;
2345 
2346 	/*
2347 	 * Before gen4, the registers and the GTT are behind different BARs.
2348 	 * However, from gen4 onwards, the registers and the GTT are shared
2349 	 * in the same BAR, so we want to restrict this ioremap from
2350 	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2351 	 * the register BAR remains the same size for all the earlier
2352 	 * generations up to Ironlake.
2353 	 * For dgfx chips register range is expanded to 4MB, and this larger
2354 	 * range is also used for integrated gpus beginning with Meteor Lake.
2355 	 */
2356 	if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2357 		mmio_size = 4 * 1024 * 1024;
2358 	else if (GRAPHICS_VER(i915) >= 5)
2359 		mmio_size = 2 * 1024 * 1024;
2360 	else
2361 		mmio_size = 512 * 1024;
2362 
2363 	uncore->regs = ioremap(phys_addr, mmio_size);
2364 	if (uncore->regs == NULL) {
2365 		drm_err(&i915->drm, "failed to map registers\n");
2366 		return -EIO;
2367 	}
2368 
2369 	return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio,
2370 					(void __force *)uncore->regs);
2371 }
2372 
2373 void intel_uncore_init_early(struct intel_uncore *uncore,
2374 			     struct intel_gt *gt)
2375 {
2376 	spin_lock_init(&uncore->lock);
2377 	uncore->i915 = gt->i915;
2378 	uncore->gt = gt;
2379 	uncore->rpm = &gt->i915->runtime_pm;
2380 }
2381 
2382 static void uncore_raw_init(struct intel_uncore *uncore)
2383 {
2384 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2385 
2386 	if (intel_vgpu_active(uncore->i915)) {
2387 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2388 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2389 	} else if (GRAPHICS_VER(uncore->i915) == 5) {
2390 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2391 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2392 	} else {
2393 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2394 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2395 	}
2396 }
2397 
2398 static int uncore_media_forcewake_init(struct intel_uncore *uncore)
2399 {
2400 	struct drm_i915_private *i915 = uncore->i915;
2401 
2402 	if (MEDIA_VER(i915) >= 13) {
2403 		ASSIGN_FW_DOMAINS_TABLE(uncore, __xelpmp_fw_ranges);
2404 		ASSIGN_SHADOW_TABLE(uncore, xelpmp_shadowed_regs);
2405 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2406 	} else {
2407 		MISSING_CASE(MEDIA_VER(i915));
2408 		return -ENODEV;
2409 	}
2410 
2411 	return 0;
2412 }
2413 
2414 static int uncore_forcewake_init(struct intel_uncore *uncore)
2415 {
2416 	struct drm_i915_private *i915 = uncore->i915;
2417 	int ret;
2418 
2419 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2420 
2421 	ret = intel_uncore_fw_domains_init(uncore);
2422 	if (ret)
2423 		return ret;
2424 	forcewake_early_sanitize(uncore, 0);
2425 
2426 	ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2427 
2428 	if (uncore->gt->type == GT_MEDIA)
2429 		return uncore_media_forcewake_init(uncore);
2430 
2431 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
2432 		ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
2433 		ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
2434 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2435 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2436 		ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2437 		ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2438 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2439 	} else if (GRAPHICS_VER(i915) >= 12) {
2440 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2441 		ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2442 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2443 	} else if (GRAPHICS_VER(i915) == 11) {
2444 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2445 		ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2446 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2447 	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2448 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2449 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2450 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2451 	} else if (IS_CHERRYVIEW(i915)) {
2452 		ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2453 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2454 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2455 	} else if (GRAPHICS_VER(i915) == 8) {
2456 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2457 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2458 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2459 	} else if (IS_VALLEYVIEW(i915)) {
2460 		ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2461 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2462 	} else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2463 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2464 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2465 	}
2466 
2467 	uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2468 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2469 
2470 	return 0;
2471 }
2472 
2473 static int sanity_check_mmio_access(struct intel_uncore *uncore)
2474 {
2475 	struct drm_i915_private *i915 = uncore->i915;
2476 
2477 	if (GRAPHICS_VER(i915) < 8)
2478 		return 0;
2479 
2480 	/*
2481 	 * Sanitycheck that MMIO access to the device is working properly.  If
2482 	 * the CPU is unable to communicate with a PCI device, BAR reads will
2483 	 * return 0xFFFFFFFF.  Let's make sure the device isn't in this state
2484 	 * before we start trying to access registers.
2485 	 *
2486 	 * We use the primary GT's forcewake register as our guinea pig since
2487 	 * it's been around since HSW and it's a masked register so the upper
2488 	 * 16 bits can never read back as 1's if device access is operating
2489 	 * properly.
2490 	 *
2491 	 * If MMIO isn't working, we'll wait up to 2 seconds to see if it
2492 	 * recovers, then give up.
2493 	 */
2494 #define COND (__raw_uncore_read32(uncore, FORCEWAKE_MT) != ~0)
2495 	if (wait_for(COND, 2000) == -ETIMEDOUT) {
2496 		drm_err(&i915->drm, "Device is non-operational; MMIO access returns 0xFFFFFFFF!\n");
2497 		return -EIO;
2498 	}
2499 
2500 	return 0;
2501 }
2502 
2503 int intel_uncore_init_mmio(struct intel_uncore *uncore)
2504 {
2505 	struct drm_i915_private *i915 = uncore->i915;
2506 	struct intel_display *display = i915->display;
2507 	int ret;
2508 
2509 	ret = sanity_check_mmio_access(uncore);
2510 	if (ret)
2511 		return ret;
2512 
2513 	/*
2514 	 * The boot firmware initializes local memory and assesses its health.
2515 	 * If memory training fails, the punit will have been instructed to
2516 	 * keep the GT powered down; we won't be able to communicate with it
2517 	 * and we should not continue with driver initialization.
2518 	 */
2519 	if (IS_DGFX(i915) &&
2520 	    !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2521 		drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2522 		return -ENODEV;
2523 	}
2524 
2525 	if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2526 		uncore->flags |= UNCORE_HAS_FORCEWAKE;
2527 
2528 	if (!intel_uncore_has_forcewake(uncore)) {
2529 		uncore_raw_init(uncore);
2530 	} else {
2531 		ret = uncore_forcewake_init(uncore);
2532 		if (ret)
2533 			return ret;
2534 	}
2535 
2536 	/* make sure fw funcs are set if and only if we have fw*/
2537 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
2538 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2539 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2540 
2541 	if (HAS_FPGA_DBG_UNCLAIMED(display))
2542 		uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2543 
2544 	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2545 		uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2546 
2547 	if (IS_GRAPHICS_VER(i915, 6, 7))
2548 		uncore->flags |= UNCORE_HAS_FIFO;
2549 
2550 	/* clear out unclaimed reg detection bit */
2551 	if (intel_uncore_unclaimed_mmio(uncore))
2552 		drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2553 
2554 	return 0;
2555 }
2556 
2557 /*
2558  * We might have detected that some engines are fused off after we initialized
2559  * the forcewake domains. Prune them, to make sure they only reference existing
2560  * engines.
2561  */
2562 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2563 					  struct intel_gt *gt)
2564 {
2565 	enum forcewake_domains fw_domains = uncore->fw_domains;
2566 	enum forcewake_domain_id domain_id;
2567 	int i;
2568 
2569 	if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2570 		return;
2571 
2572 	for (i = 0; i < I915_MAX_VCS; i++) {
2573 		domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2574 
2575 		if (HAS_ENGINE(gt, _VCS(i)))
2576 			continue;
2577 
2578 		/*
2579 		 * Starting with XeHP, the power well for an even-numbered
2580 		 * VDBOX is also used for shared units within the
2581 		 * media slice such as SFC.  So even if the engine
2582 		 * itself is fused off, we still need to initialize
2583 		 * the forcewake domain if any of the other engines
2584 		 * in the same media slice are present.
2585 		 */
2586 		if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) {
2587 			if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2588 				continue;
2589 
2590 			if (HAS_ENGINE(gt, _VECS(i / 2)))
2591 				continue;
2592 		}
2593 
2594 		if (fw_domains & BIT(domain_id))
2595 			fw_domain_fini(uncore, domain_id);
2596 	}
2597 
2598 	for (i = 0; i < I915_MAX_VECS; i++) {
2599 		domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2600 
2601 		if (HAS_ENGINE(gt, _VECS(i)))
2602 			continue;
2603 
2604 		if (fw_domains & BIT(domain_id))
2605 			fw_domain_fini(uncore, domain_id);
2606 	}
2607 
2608 	if ((fw_domains & BIT(FW_DOMAIN_ID_GSC)) && !HAS_ENGINE(gt, GSC0))
2609 		fw_domain_fini(uncore, FW_DOMAIN_ID_GSC);
2610 }
2611 
2612 /*
2613  * The driver-initiated FLR is the highest level of reset that we can trigger
2614  * from within the driver. It is different from the PCI FLR in that it doesn't
2615  * fully reset the SGUnit and doesn't modify the PCI config space and therefore
2616  * it doesn't require a re-enumeration of the PCI BARs. However, the
2617  * driver-initiated FLR does still cause a reset of both GT and display and a
2618  * memory wipe of local and stolen memory, so recovery would require a full HW
2619  * re-init and saving/restoring (or re-populating) the wiped memory. Since we
2620  * perform the FLR as the very last action before releasing access to the HW
2621  * during the driver release flow, we don't attempt recovery at all, because
2622  * if/when a new instance of i915 is bound to the device it will do a full
2623  * re-init anyway.
2624  */
2625 static void driver_initiated_flr(struct intel_uncore *uncore)
2626 {
2627 	struct drm_i915_private *i915 = uncore->i915;
2628 	unsigned int flr_timeout_ms;
2629 	int ret;
2630 
2631 	drm_dbg(&i915->drm, "Triggering Driver-FLR\n");
2632 
2633 	/*
2634 	 * The specification recommends a 3 seconds FLR reset timeout. To be
2635 	 * cautious, we will extend this to 9 seconds, three times the specified
2636 	 * timeout.
2637 	 */
2638 	flr_timeout_ms = 9000;
2639 
2640 	/*
2641 	 * Make sure any pending FLR requests have cleared by waiting for the
2642 	 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
2643 	 * to make sure it's not still set from a prior attempt (it's a write to
2644 	 * clear bit).
2645 	 * Note that we should never be in a situation where a previous attempt
2646 	 * is still pending (unless the HW is totally dead), but better to be
2647 	 * safe in case something unexpected happens
2648 	 */
2649 	ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms, NULL);
2650 	if (ret) {
2651 		drm_err(&i915->drm,
2652 			"Failed to wait for Driver-FLR bit to clear! %d\n",
2653 			ret);
2654 		return;
2655 	}
2656 	intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2657 
2658 	/* Trigger the actual Driver-FLR */
2659 	intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
2660 
2661 	/* Wait for hardware teardown to complete */
2662 	ret = intel_wait_for_register_fw(uncore, GU_CNTL,
2663 					 DRIVERFLR, 0,
2664 					 flr_timeout_ms, NULL);
2665 	if (ret) {
2666 		drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
2667 		return;
2668 	}
2669 
2670 	/* Wait for hardware/firmware re-init to complete */
2671 	ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
2672 					 DRIVERFLR_STATUS, DRIVERFLR_STATUS,
2673 					 flr_timeout_ms, NULL);
2674 	if (ret) {
2675 		drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
2676 		return;
2677 	}
2678 
2679 	/* Clear sticky completion status */
2680 	intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2681 }
2682 
2683 /* Called via drm-managed action */
2684 void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
2685 {
2686 	struct intel_uncore *uncore = data;
2687 
2688 	if (intel_uncore_has_forcewake(uncore)) {
2689 		iosf_mbi_punit_acquire();
2690 		iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2691 			&uncore->pmic_bus_access_nb);
2692 		intel_uncore_forcewake_reset(uncore);
2693 		intel_uncore_fw_domains_fini(uncore);
2694 		iosf_mbi_punit_release();
2695 	}
2696 
2697 	if (intel_uncore_needs_flr_on_fini(uncore))
2698 		driver_initiated_flr(uncore);
2699 }
2700 
2701 /**
2702  * __intel_wait_for_register_fw - wait until register matches expected state
2703  * @uncore: the struct intel_uncore
2704  * @reg: the register to read
2705  * @mask: mask to apply to register value
2706  * @value: expected value
2707  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2708  * @slow_timeout_ms: slow timeout in millisecond
2709  * @out_value: optional placeholder to hold registry value
2710  *
2711  * This routine waits until the target register @reg contains the expected
2712  * @value after applying the @mask, i.e. it waits until ::
2713  *
2714  *     (intel_uncore_read_fw(uncore, reg) & mask) == value
2715  *
2716  * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2717  * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2718  * must be not larger than 20,0000 microseconds.
2719  *
2720  * Note that this routine assumes the caller holds forcewake asserted, it is
2721  * not suitable for very long waits. See intel_wait_for_register() if you
2722  * wish to wait without holding forcewake for the duration (i.e. you expect
2723  * the wait to be slow).
2724  *
2725  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2726  */
2727 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2728 				 i915_reg_t reg,
2729 				 u32 mask,
2730 				 u32 value,
2731 				 unsigned int fast_timeout_us,
2732 				 unsigned int slow_timeout_ms,
2733 				 u32 *out_value)
2734 {
2735 	u32 reg_value = 0;
2736 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2737 	int ret;
2738 
2739 	/* Catch any overuse of this function */
2740 	might_sleep_if(slow_timeout_ms);
2741 	GEM_BUG_ON(fast_timeout_us > 20000);
2742 	GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2743 
2744 	ret = -ETIMEDOUT;
2745 	if (fast_timeout_us && fast_timeout_us <= 20000)
2746 		ret = _wait_for_atomic(done, fast_timeout_us, 0);
2747 	if (ret && slow_timeout_ms)
2748 		ret = wait_for(done, slow_timeout_ms);
2749 
2750 	if (out_value)
2751 		*out_value = reg_value;
2752 
2753 	return ret;
2754 #undef done
2755 }
2756 
2757 /**
2758  * __intel_wait_for_register - wait until register matches expected state
2759  * @uncore: the struct intel_uncore
2760  * @reg: the register to read
2761  * @mask: mask to apply to register value
2762  * @value: expected value
2763  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2764  * @slow_timeout_ms: slow timeout in millisecond
2765  * @out_value: optional placeholder to hold registry value
2766  *
2767  * This routine waits until the target register @reg contains the expected
2768  * @value after applying the @mask, i.e. it waits until ::
2769  *
2770  *     (intel_uncore_read(uncore, reg) & mask) == value
2771  *
2772  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2773  *
2774  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2775  */
2776 int __intel_wait_for_register(struct intel_uncore *uncore,
2777 			      i915_reg_t reg,
2778 			      u32 mask,
2779 			      u32 value,
2780 			      unsigned int fast_timeout_us,
2781 			      unsigned int slow_timeout_ms,
2782 			      u32 *out_value)
2783 {
2784 	unsigned fw =
2785 		intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2786 	u32 reg_value;
2787 	int ret;
2788 
2789 	might_sleep_if(slow_timeout_ms);
2790 
2791 	spin_lock_irq(&uncore->lock);
2792 	intel_uncore_forcewake_get__locked(uncore, fw);
2793 
2794 	ret = __intel_wait_for_register_fw(uncore,
2795 					   reg, mask, value,
2796 					   fast_timeout_us, 0, &reg_value);
2797 
2798 	intel_uncore_forcewake_put__locked(uncore, fw);
2799 	spin_unlock_irq(&uncore->lock);
2800 
2801 	if (ret && slow_timeout_ms)
2802 		ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2803 								       reg),
2804 				 (reg_value & mask) == value,
2805 				 slow_timeout_ms * 1000, 10, 1000);
2806 
2807 	/* just trace the final value */
2808 	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2809 
2810 	if (out_value)
2811 		*out_value = reg_value;
2812 
2813 	return ret;
2814 }
2815 
2816 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2817 {
2818 	bool ret;
2819 
2820 	if (!uncore->debug)
2821 		return false;
2822 
2823 	spin_lock_irq(&uncore->debug->lock);
2824 	ret = check_for_unclaimed_mmio(uncore);
2825 	spin_unlock_irq(&uncore->debug->lock);
2826 
2827 	return ret;
2828 }
2829 
2830 bool
2831 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2832 {
2833 	bool ret = false;
2834 
2835 	if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
2836 		return false;
2837 
2838 	spin_lock_irq(&uncore->debug->lock);
2839 
2840 	if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2841 		goto out;
2842 
2843 	if (unlikely(check_for_unclaimed_mmio(uncore))) {
2844 		if (!uncore->i915->params.mmio_debug) {
2845 			drm_dbg(&uncore->i915->drm,
2846 				"Unclaimed register detected, "
2847 				"enabling oneshot unclaimed register reporting. "
2848 				"Please use i915.mmio_debug=N for more information.\n");
2849 			uncore->i915->params.mmio_debug++;
2850 		}
2851 		uncore->debug->unclaimed_mmio_check--;
2852 		ret = true;
2853 	}
2854 
2855 out:
2856 	spin_unlock_irq(&uncore->debug->lock);
2857 
2858 	return ret;
2859 }
2860 
2861 /**
2862  * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2863  * 				    a register
2864  * @uncore: pointer to struct intel_uncore
2865  * @reg: register in question
2866  * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2867  *
2868  * Returns a set of forcewake domains required to be taken with for example
2869  * intel_uncore_forcewake_get for the specified register to be accessible in the
2870  * specified mode (read, write or read/write) with raw mmio accessors.
2871  *
2872  * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2873  * callers to do FIFO management on their own or risk losing writes.
2874  */
2875 enum forcewake_domains
2876 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2877 			       i915_reg_t reg, unsigned int op)
2878 {
2879 	enum forcewake_domains fw_domains = 0;
2880 
2881 	drm_WARN_ON(&uncore->i915->drm, !op);
2882 
2883 	if (!intel_uncore_has_forcewake(uncore))
2884 		return 0;
2885 
2886 	if (op & FW_REG_READ)
2887 		fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2888 
2889 	if (op & FW_REG_WRITE)
2890 		fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2891 
2892 	drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2893 
2894 	return fw_domains;
2895 }
2896 
2897 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2898 #include "selftests/mock_uncore.c"
2899 #include "selftests/intel_uncore.c"
2900 #endif
2901