xref: /linux/drivers/gpu/drm/i915/intel_uncore.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/pm_runtime.h>
25 
26 #include <drm/drm_managed.h>
27 #include <drm/drm_print.h>
28 
29 #include "display/intel_display_core.h"
30 #include "gt/intel_engine_regs.h"
31 #include "gt/intel_gt.h"
32 #include "gt/intel_gt_regs.h"
33 
34 #include "i915_drv.h"
35 #include "i915_iosf_mbi.h"
36 #include "i915_reg.h"
37 #include "i915_vgpu.h"
38 #include "i915_wait_util.h"
39 #include "i915_mmio_range.h"
40 #include "intel_uncore_trace.h"
41 
42 #define FORCEWAKE_ACK_TIMEOUT_MS 50
43 #define GT_FIFO_TIMEOUT_MS	 10
44 
45 struct intel_uncore *to_intel_uncore(struct drm_device *drm)
46 {
47 	return &to_i915(drm)->uncore;
48 }
49 
50 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
51 
52 static void
53 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
54 {
55 	uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
56 }
57 
58 void
59 intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
60 {
61 	spin_lock_init(&i915->mmio_debug.lock);
62 	i915->mmio_debug.unclaimed_mmio_check = 1;
63 
64 	i915->uncore.debug = &i915->mmio_debug;
65 }
66 
67 static void mmio_debug_suspend(struct intel_uncore *uncore)
68 {
69 	if (!uncore->debug)
70 		return;
71 
72 	spin_lock(&uncore->debug->lock);
73 
74 	/* Save and disable mmio debugging for the user bypass */
75 	if (!uncore->debug->suspend_count++) {
76 		uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
77 		uncore->debug->unclaimed_mmio_check = 0;
78 	}
79 
80 	spin_unlock(&uncore->debug->lock);
81 }
82 
83 static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
84 
85 static void mmio_debug_resume(struct intel_uncore *uncore)
86 {
87 	if (!uncore->debug)
88 		return;
89 
90 	spin_lock(&uncore->debug->lock);
91 
92 	if (!--uncore->debug->suspend_count)
93 		uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
94 
95 	if (check_for_unclaimed_mmio(uncore))
96 		drm_info(&uncore->i915->drm,
97 			 "Invalid mmio detected during user access\n");
98 
99 	spin_unlock(&uncore->debug->lock);
100 }
101 
102 static const char * const forcewake_domain_names[] = {
103 	"render",
104 	"gt",
105 	"media",
106 	"vdbox0",
107 	"vdbox1",
108 	"vdbox2",
109 	"vdbox3",
110 	"vdbox4",
111 	"vdbox5",
112 	"vdbox6",
113 	"vdbox7",
114 	"vebox0",
115 	"vebox1",
116 	"vebox2",
117 	"vebox3",
118 	"gsc",
119 };
120 
121 const char *
122 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
123 {
124 	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
125 
126 	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
127 		return forcewake_domain_names[id];
128 
129 	WARN_ON(id);
130 
131 	return "unknown";
132 }
133 
134 #define fw_ack(d) readl((d)->reg_ack)
135 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
136 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
137 
138 static inline void
139 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
140 {
141 	/*
142 	 * We don't really know if the powerwell for the forcewake domain we are
143 	 * trying to reset here does exist at this point (engines could be fused
144 	 * off in ICL+), so no waiting for acks
145 	 */
146 	/* WaRsClearFWBitsAtReset */
147 	if (GRAPHICS_VER(d->uncore->i915) >= 12)
148 		fw_clear(d, 0xefff);
149 	else
150 		fw_clear(d, 0xffff);
151 }
152 
153 static inline void
154 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
155 {
156 	GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
157 	d->uncore->fw_domains_timer |= d->mask;
158 	d->wake_count++;
159 	hrtimer_start_range_ns(&d->timer,
160 			       NSEC_PER_MSEC,
161 			       NSEC_PER_MSEC,
162 			       HRTIMER_MODE_REL);
163 }
164 
165 static inline int
166 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
167 	       const u32 ack,
168 	       const u32 value)
169 {
170 	return wait_for_atomic((fw_ack(d) & ack) == value,
171 			       FORCEWAKE_ACK_TIMEOUT_MS);
172 }
173 
174 static inline int
175 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
176 	       const u32 ack)
177 {
178 	return __wait_for_ack(d, ack, 0);
179 }
180 
181 static inline int
182 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
183 	     const u32 ack)
184 {
185 	return __wait_for_ack(d, ack, ack);
186 }
187 
188 static inline void
189 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
190 {
191 	if (!wait_ack_clear(d, FORCEWAKE_KERNEL))
192 		return;
193 
194 	if (fw_ack(d) == ~0) {
195 		drm_err(&d->uncore->i915->drm,
196 			"%s: MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
197 			intel_uncore_forcewake_domain_to_str(d->id));
198 		intel_gt_set_wedged_async(d->uncore->gt);
199 	} else {
200 		drm_err(&d->uncore->i915->drm,
201 			"%s: timed out waiting for forcewake ack to clear.\n",
202 			intel_uncore_forcewake_domain_to_str(d->id));
203 	}
204 
205 	add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
206 }
207 
208 enum ack_type {
209 	ACK_CLEAR = 0,
210 	ACK_SET
211 };
212 
213 static int
214 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
215 				 const enum ack_type type)
216 {
217 	const u32 ack_bit = FORCEWAKE_KERNEL;
218 	const u32 value = type == ACK_SET ? ack_bit : 0;
219 	unsigned int pass;
220 	bool ack_detected;
221 
222 	/*
223 	 * There is a possibility of driver's wake request colliding
224 	 * with hardware's own wake requests and that can cause
225 	 * hardware to not deliver the driver's ack message.
226 	 *
227 	 * Use a fallback bit toggle to kick the gpu state machine
228 	 * in the hope that the original ack will be delivered along with
229 	 * the fallback ack.
230 	 *
231 	 * This workaround is described in HSDES #1604254524 and it's known as:
232 	 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
233 	 * although the name is a bit misleading.
234 	 */
235 
236 	pass = 1;
237 	do {
238 		wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
239 
240 		fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
241 		/* Give gt some time to relax before the polling frenzy */
242 		udelay(10 * pass);
243 		wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
244 
245 		ack_detected = (fw_ack(d) & ack_bit) == value;
246 
247 		fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
248 	} while (!ack_detected && pass++ < 10);
249 
250 	drm_dbg(&d->uncore->i915->drm,
251 		"%s had to use fallback to %s ack, 0x%x (passes %u)\n",
252 		intel_uncore_forcewake_domain_to_str(d->id),
253 		type == ACK_SET ? "set" : "clear",
254 		fw_ack(d),
255 		pass);
256 
257 	return ack_detected ? 0 : -ETIMEDOUT;
258 }
259 
260 static inline void
261 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
262 {
263 	if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
264 		return;
265 
266 	if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
267 		fw_domain_wait_ack_clear(d);
268 }
269 
270 static inline void
271 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
272 {
273 	fw_set(d, FORCEWAKE_KERNEL);
274 }
275 
276 static inline void
277 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
278 {
279 	if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
280 		drm_err(&d->uncore->i915->drm,
281 			"%s: timed out waiting for forcewake ack request.\n",
282 			intel_uncore_forcewake_domain_to_str(d->id));
283 		add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
284 	}
285 }
286 
287 static inline void
288 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
289 {
290 	if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
291 		return;
292 
293 	if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
294 		fw_domain_wait_ack_set(d);
295 }
296 
297 static inline void
298 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
299 {
300 	fw_clear(d, FORCEWAKE_KERNEL);
301 }
302 
303 static void
304 fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
305 {
306 	struct intel_uncore_forcewake_domain *d;
307 	unsigned int tmp;
308 
309 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
310 
311 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
312 		fw_domain_wait_ack_clear(d);
313 		fw_domain_get(d);
314 	}
315 
316 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
317 		fw_domain_wait_ack_set(d);
318 
319 	uncore->fw_domains_active |= fw_domains;
320 }
321 
322 static void
323 fw_domains_get_with_fallback(struct intel_uncore *uncore,
324 			     enum forcewake_domains fw_domains)
325 {
326 	struct intel_uncore_forcewake_domain *d;
327 	unsigned int tmp;
328 
329 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
330 
331 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
332 		fw_domain_wait_ack_clear_fallback(d);
333 		fw_domain_get(d);
334 	}
335 
336 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
337 		fw_domain_wait_ack_set_fallback(d);
338 
339 	uncore->fw_domains_active |= fw_domains;
340 }
341 
342 static void
343 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
344 {
345 	struct intel_uncore_forcewake_domain *d;
346 	unsigned int tmp;
347 
348 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
349 
350 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
351 		fw_domain_put(d);
352 
353 	uncore->fw_domains_active &= ~fw_domains;
354 }
355 
356 static void
357 fw_domains_reset(struct intel_uncore *uncore,
358 		 enum forcewake_domains fw_domains)
359 {
360 	struct intel_uncore_forcewake_domain *d;
361 	unsigned int tmp;
362 
363 	if (!fw_domains)
364 		return;
365 
366 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
367 
368 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
369 		fw_domain_reset(d);
370 }
371 
372 static inline u32 gt_thread_status(struct intel_uncore *uncore)
373 {
374 	u32 val;
375 
376 	val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
377 	val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
378 
379 	return val;
380 }
381 
382 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
383 {
384 	/*
385 	 * w/a for a sporadic read returning 0 by waiting for the GT
386 	 * thread to wake up.
387 	 */
388 	drm_WARN_ONCE(&uncore->i915->drm,
389 		      wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
390 		      "GT thread status wait timed out\n");
391 }
392 
393 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
394 					      enum forcewake_domains fw_domains)
395 {
396 	fw_domains_get_normal(uncore, fw_domains);
397 
398 	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
399 	__gen6_gt_wait_for_thread_c0(uncore);
400 }
401 
402 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
403 {
404 	u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
405 
406 	return count & GT_FIFO_FREE_ENTRIES_MASK;
407 }
408 
409 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
410 {
411 	u32 n;
412 
413 	/* On VLV, FIFO will be shared by both SW and HW.
414 	 * So, we need to read the FREE_ENTRIES everytime */
415 	if (IS_VALLEYVIEW(uncore->i915))
416 		n = fifo_free_entries(uncore);
417 	else
418 		n = uncore->fifo_count;
419 
420 	if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
421 		if (wait_for_atomic((n = fifo_free_entries(uncore)) >
422 				    GT_FIFO_NUM_RESERVED_ENTRIES,
423 				    GT_FIFO_TIMEOUT_MS)) {
424 			drm_dbg(&uncore->i915->drm,
425 				"GT_FIFO timeout, entries: %u\n", n);
426 			return;
427 		}
428 	}
429 
430 	uncore->fifo_count = n - 1;
431 }
432 
433 static enum hrtimer_restart
434 intel_uncore_fw_release_timer(struct hrtimer *timer)
435 {
436 	struct intel_uncore_forcewake_domain *domain =
437 	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
438 	struct intel_uncore *uncore = domain->uncore;
439 	unsigned long irqflags;
440 
441 	assert_rpm_device_not_suspended(uncore->rpm);
442 
443 	if (xchg(&domain->active, false))
444 		return HRTIMER_RESTART;
445 
446 	spin_lock_irqsave(&uncore->lock, irqflags);
447 
448 	uncore->fw_domains_timer &= ~domain->mask;
449 
450 	GEM_BUG_ON(!domain->wake_count);
451 	if (--domain->wake_count == 0)
452 		fw_domains_put(uncore, domain->mask);
453 
454 	spin_unlock_irqrestore(&uncore->lock, irqflags);
455 
456 	return HRTIMER_NORESTART;
457 }
458 
459 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
460 static unsigned int
461 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
462 {
463 	unsigned long irqflags;
464 	struct intel_uncore_forcewake_domain *domain;
465 	int retry_count = 100;
466 	enum forcewake_domains fw, active_domains;
467 
468 	iosf_mbi_assert_punit_acquired();
469 
470 	/* Hold uncore.lock across reset to prevent any register access
471 	 * with forcewake not set correctly. Wait until all pending
472 	 * timers are run before holding.
473 	 */
474 	while (1) {
475 		unsigned int tmp;
476 
477 		active_domains = 0;
478 
479 		for_each_fw_domain(domain, uncore, tmp) {
480 			smp_store_mb(domain->active, false);
481 			if (hrtimer_cancel(&domain->timer) == 0)
482 				continue;
483 
484 			intel_uncore_fw_release_timer(&domain->timer);
485 		}
486 
487 		spin_lock_irqsave(&uncore->lock, irqflags);
488 
489 		for_each_fw_domain(domain, uncore, tmp) {
490 			if (hrtimer_active(&domain->timer))
491 				active_domains |= domain->mask;
492 		}
493 
494 		if (active_domains == 0)
495 			break;
496 
497 		if (--retry_count == 0) {
498 			drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
499 			break;
500 		}
501 
502 		spin_unlock_irqrestore(&uncore->lock, irqflags);
503 		cond_resched();
504 	}
505 
506 	drm_WARN_ON(&uncore->i915->drm, active_domains);
507 
508 	fw = uncore->fw_domains_active;
509 	if (fw)
510 		fw_domains_put(uncore, fw);
511 
512 	fw_domains_reset(uncore, uncore->fw_domains);
513 	assert_forcewakes_inactive(uncore);
514 
515 	spin_unlock_irqrestore(&uncore->lock, irqflags);
516 
517 	return fw; /* track the lost user forcewake domains */
518 }
519 
520 static bool
521 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
522 {
523 	u32 dbg;
524 
525 	dbg = __raw_uncore_read32(uncore, FPGA_DBG);
526 	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
527 		return false;
528 
529 	/*
530 	 * Bugs in PCI programming (or failing hardware) can occasionally cause
531 	 * us to lose access to the MMIO BAR.  When this happens, register
532 	 * reads will come back with 0xFFFFFFFF for every register and things
533 	 * go bad very quickly.  Let's try to detect that special case and at
534 	 * least try to print a more informative message about what has
535 	 * happened.
536 	 *
537 	 * During normal operation the FPGA_DBG register has several unused
538 	 * bits that will always read back as 0's so we can use them as canaries
539 	 * to recognize when MMIO accesses are just busted.
540 	 */
541 	if (unlikely(dbg == ~0))
542 		drm_err(&uncore->i915->drm,
543 			"Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
544 
545 	__raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
546 
547 	return true;
548 }
549 
550 static bool
551 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
552 {
553 	u32 cer;
554 
555 	cer = __raw_uncore_read32(uncore, CLAIM_ER);
556 	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
557 		return false;
558 
559 	__raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
560 
561 	return true;
562 }
563 
564 static bool
565 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
566 {
567 	u32 fifodbg;
568 
569 	fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
570 
571 	if (unlikely(fifodbg)) {
572 		drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
573 		__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
574 	}
575 
576 	return fifodbg;
577 }
578 
579 static bool
580 check_for_unclaimed_mmio(struct intel_uncore *uncore)
581 {
582 	bool ret = false;
583 
584 	lockdep_assert_held(&uncore->debug->lock);
585 
586 	if (uncore->debug->suspend_count)
587 		return false;
588 
589 	if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
590 		ret |= fpga_check_for_unclaimed_mmio(uncore);
591 
592 	if (intel_uncore_has_dbg_unclaimed(uncore))
593 		ret |= vlv_check_for_unclaimed_mmio(uncore);
594 
595 	if (intel_uncore_has_fifo(uncore))
596 		ret |= gen6_check_for_fifo_debug(uncore);
597 
598 	return ret;
599 }
600 
601 static void forcewake_early_sanitize(struct intel_uncore *uncore,
602 				     unsigned int restore_forcewake)
603 {
604 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
605 
606 	/* WaDisableShadowRegForCpd:chv */
607 	if (IS_CHERRYVIEW(uncore->i915)) {
608 		__raw_uncore_write32(uncore, GTFIFOCTL,
609 				     __raw_uncore_read32(uncore, GTFIFOCTL) |
610 				     GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
611 				     GT_FIFO_CTL_RC6_POLICY_STALL);
612 	}
613 
614 	iosf_mbi_punit_acquire();
615 	intel_uncore_forcewake_reset(uncore);
616 	if (restore_forcewake) {
617 		spin_lock_irq(&uncore->lock);
618 		fw_domains_get(uncore, restore_forcewake);
619 
620 		if (intel_uncore_has_fifo(uncore))
621 			uncore->fifo_count = fifo_free_entries(uncore);
622 		spin_unlock_irq(&uncore->lock);
623 	}
624 	iosf_mbi_punit_release();
625 }
626 
627 void intel_uncore_suspend(struct intel_uncore *uncore)
628 {
629 	if (!intel_uncore_has_forcewake(uncore))
630 		return;
631 
632 	iosf_mbi_punit_acquire();
633 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
634 		&uncore->pmic_bus_access_nb);
635 	uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
636 	iosf_mbi_punit_release();
637 }
638 
639 void intel_uncore_resume_early(struct intel_uncore *uncore)
640 {
641 	unsigned int restore_forcewake;
642 
643 	if (intel_uncore_unclaimed_mmio(uncore))
644 		drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
645 
646 	if (!intel_uncore_has_forcewake(uncore))
647 		return;
648 
649 	restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
650 	forcewake_early_sanitize(uncore, restore_forcewake);
651 
652 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
653 }
654 
655 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
656 {
657 	if (!intel_uncore_has_forcewake(uncore))
658 		return;
659 
660 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
661 }
662 
663 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
664 					 enum forcewake_domains fw_domains)
665 {
666 	struct intel_uncore_forcewake_domain *domain;
667 	unsigned int tmp;
668 
669 	fw_domains &= uncore->fw_domains;
670 
671 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
672 		if (domain->wake_count++) {
673 			fw_domains &= ~domain->mask;
674 			domain->active = true;
675 		}
676 	}
677 
678 	if (fw_domains)
679 		fw_domains_get(uncore, fw_domains);
680 }
681 
682 /**
683  * intel_uncore_forcewake_get - grab forcewake domain references
684  * @uncore: the intel_uncore structure
685  * @fw_domains: forcewake domains to get reference on
686  *
687  * This function can be used get GT's forcewake domain references.
688  * Normal register access will handle the forcewake domains automatically.
689  * However if some sequence requires the GT to not power down a particular
690  * forcewake domains this function should be called at the beginning of the
691  * sequence. And subsequently the reference should be dropped by symmetric
692  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
693  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
694  */
695 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
696 				enum forcewake_domains fw_domains)
697 {
698 	unsigned long irqflags;
699 
700 	if (!uncore->fw_get_funcs)
701 		return;
702 
703 	assert_rpm_wakelock_held(uncore->rpm);
704 
705 	spin_lock_irqsave(&uncore->lock, irqflags);
706 	__intel_uncore_forcewake_get(uncore, fw_domains);
707 	spin_unlock_irqrestore(&uncore->lock, irqflags);
708 }
709 
710 /**
711  * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
712  * @uncore: the intel_uncore structure
713  *
714  * This function is a wrapper around intel_uncore_forcewake_get() to acquire
715  * the GT powerwell and in the process disable our debugging for the
716  * duration of userspace's bypass.
717  */
718 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
719 {
720 	spin_lock_irq(&uncore->lock);
721 	if (!uncore->user_forcewake_count++) {
722 		intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
723 		mmio_debug_suspend(uncore);
724 	}
725 	spin_unlock_irq(&uncore->lock);
726 }
727 
728 /**
729  * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
730  * @uncore: the intel_uncore structure
731  *
732  * This function complements intel_uncore_forcewake_user_get() and releases
733  * the GT powerwell taken on behalf of the userspace bypass.
734  */
735 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
736 {
737 	spin_lock_irq(&uncore->lock);
738 	if (!--uncore->user_forcewake_count) {
739 		mmio_debug_resume(uncore);
740 		intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
741 	}
742 	spin_unlock_irq(&uncore->lock);
743 }
744 
745 /**
746  * intel_uncore_forcewake_get__locked - grab forcewake domain references
747  * @uncore: the intel_uncore structure
748  * @fw_domains: forcewake domains to get reference on
749  *
750  * See intel_uncore_forcewake_get(). This variant places the onus
751  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
752  */
753 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
754 					enum forcewake_domains fw_domains)
755 {
756 	lockdep_assert_held(&uncore->lock);
757 
758 	if (!uncore->fw_get_funcs)
759 		return;
760 
761 	__intel_uncore_forcewake_get(uncore, fw_domains);
762 }
763 
764 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
765 					 enum forcewake_domains fw_domains,
766 					 bool delayed)
767 {
768 	struct intel_uncore_forcewake_domain *domain;
769 	unsigned int tmp;
770 
771 	fw_domains &= uncore->fw_domains;
772 
773 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
774 		GEM_BUG_ON(!domain->wake_count);
775 
776 		if (--domain->wake_count) {
777 			domain->active = true;
778 			continue;
779 		}
780 
781 		if (delayed &&
782 		    !(domain->uncore->fw_domains_timer & domain->mask))
783 			fw_domain_arm_timer(domain);
784 		else
785 			fw_domains_put(uncore, domain->mask);
786 	}
787 }
788 
789 /**
790  * intel_uncore_forcewake_put - release a forcewake domain reference
791  * @uncore: the intel_uncore structure
792  * @fw_domains: forcewake domains to put references
793  *
794  * This function drops the device-level forcewakes for specified
795  * domains obtained by intel_uncore_forcewake_get().
796  */
797 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
798 				enum forcewake_domains fw_domains)
799 {
800 	unsigned long irqflags;
801 
802 	if (!uncore->fw_get_funcs)
803 		return;
804 
805 	spin_lock_irqsave(&uncore->lock, irqflags);
806 	__intel_uncore_forcewake_put(uncore, fw_domains, false);
807 	spin_unlock_irqrestore(&uncore->lock, irqflags);
808 }
809 
810 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
811 					enum forcewake_domains fw_domains)
812 {
813 	unsigned long irqflags;
814 
815 	if (!uncore->fw_get_funcs)
816 		return;
817 
818 	spin_lock_irqsave(&uncore->lock, irqflags);
819 	__intel_uncore_forcewake_put(uncore, fw_domains, true);
820 	spin_unlock_irqrestore(&uncore->lock, irqflags);
821 }
822 
823 /**
824  * intel_uncore_forcewake_flush - flush the delayed release
825  * @uncore: the intel_uncore structure
826  * @fw_domains: forcewake domains to flush
827  */
828 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
829 				  enum forcewake_domains fw_domains)
830 {
831 	struct intel_uncore_forcewake_domain *domain;
832 	unsigned int tmp;
833 
834 	if (!uncore->fw_get_funcs)
835 		return;
836 
837 	fw_domains &= uncore->fw_domains;
838 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
839 		WRITE_ONCE(domain->active, false);
840 		if (hrtimer_cancel(&domain->timer))
841 			intel_uncore_fw_release_timer(&domain->timer);
842 	}
843 }
844 
845 /**
846  * intel_uncore_forcewake_put__locked - release forcewake domain references
847  * @uncore: the intel_uncore structure
848  * @fw_domains: forcewake domains to put references
849  *
850  * See intel_uncore_forcewake_put(). This variant places the onus
851  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
852  */
853 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
854 					enum forcewake_domains fw_domains)
855 {
856 	lockdep_assert_held(&uncore->lock);
857 
858 	if (!uncore->fw_get_funcs)
859 		return;
860 
861 	__intel_uncore_forcewake_put(uncore, fw_domains, false);
862 }
863 
864 void assert_forcewakes_inactive(struct intel_uncore *uncore)
865 {
866 	if (!uncore->fw_get_funcs)
867 		return;
868 
869 	drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
870 		 "Expected all fw_domains to be inactive, but %08x are still on\n",
871 		 uncore->fw_domains_active);
872 }
873 
874 void assert_forcewakes_active(struct intel_uncore *uncore,
875 			      enum forcewake_domains fw_domains)
876 {
877 	struct intel_uncore_forcewake_domain *domain;
878 	unsigned int tmp;
879 
880 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
881 		return;
882 
883 	if (!uncore->fw_get_funcs)
884 		return;
885 
886 	spin_lock_irq(&uncore->lock);
887 
888 	assert_rpm_wakelock_held(uncore->rpm);
889 
890 	fw_domains &= uncore->fw_domains;
891 	drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
892 		 "Expected %08x fw_domains to be active, but %08x are off\n",
893 		 fw_domains, fw_domains & ~uncore->fw_domains_active);
894 
895 	/*
896 	 * Check that the caller has an explicit wakeref and we don't mistake
897 	 * it for the auto wakeref.
898 	 */
899 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
900 		unsigned int actual = READ_ONCE(domain->wake_count);
901 		unsigned int expect = 1;
902 
903 		if (uncore->fw_domains_timer & domain->mask)
904 			expect++; /* pending automatic release */
905 
906 		if (drm_WARN(&uncore->i915->drm, actual < expect,
907 			     "Expected domain %d to be held awake by caller, count=%d\n",
908 			     domain->id, actual))
909 			break;
910 	}
911 
912 	spin_unlock_irq(&uncore->lock);
913 }
914 
915 /*
916  * We give fast paths for the really cool registers.  The second range includes
917  * media domains (and the GSC starting from Xe_LPM+)
918  */
919 #define NEEDS_FORCE_WAKE(reg) ({ \
920 	u32 __reg = (reg); \
921 	__reg < 0x40000 || __reg >= 0x116000; \
922 })
923 
924 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
925 {
926 	if (offset < entry->start)
927 		return -1;
928 	else if (offset > entry->end)
929 		return 1;
930 	else
931 		return 0;
932 }
933 
934 /* Copied and "macroized" from lib/bsearch.c */
935 #define BSEARCH(key, base, num, cmp) ({                                 \
936 	unsigned int start__ = 0, end__ = (num);                        \
937 	typeof(base) result__ = NULL;                                   \
938 	while (start__ < end__) {                                       \
939 		unsigned int mid__ = start__ + (end__ - start__) / 2;   \
940 		int ret__ = (cmp)((key), (base) + mid__);               \
941 		if (ret__ < 0) {                                        \
942 			end__ = mid__;                                  \
943 		} else if (ret__ > 0) {                                 \
944 			start__ = mid__ + 1;                            \
945 		} else {                                                \
946 			result__ = (base) + mid__;                      \
947 			break;                                          \
948 		}                                                       \
949 	}                                                               \
950 	result__;                                                       \
951 })
952 
953 static enum forcewake_domains
954 find_fw_domain(struct intel_uncore *uncore, u32 offset)
955 {
956 	const struct intel_forcewake_range *entry;
957 
958 	if (IS_GSI_REG(offset))
959 		offset += uncore->gsi_offset;
960 
961 	entry = BSEARCH(offset,
962 			uncore->fw_domains_table,
963 			uncore->fw_domains_table_entries,
964 			fw_range_cmp);
965 
966 	if (!entry)
967 		return 0;
968 
969 	/*
970 	 * The list of FW domains depends on the SKU in gen11+ so we
971 	 * can't determine it statically. We use FORCEWAKE_ALL and
972 	 * translate it here to the list of available domains.
973 	 */
974 	if (entry->domains == FORCEWAKE_ALL)
975 		return uncore->fw_domains;
976 
977 	drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
978 		 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
979 		 entry->domains & ~uncore->fw_domains, offset);
980 
981 	return entry->domains;
982 }
983 
984 /*
985  * Shadowed register tables describe special register ranges that i915 is
986  * allowed to write to without acquiring forcewake.  If these registers' power
987  * wells are down, the hardware will save values written by i915 to a shadow
988  * copy and automatically transfer them into the real register the next time
989  * the power well is woken up.  Shadowing only applies to writes; forcewake
990  * must still be acquired when reading from registers in these ranges.
991  *
992  * The documentation for shadowed registers is somewhat spotty on older
993  * platforms.  However missing registers from these lists is non-fatal; it just
994  * means we'll wake up the hardware for some register accesses where we didn't
995  * really need to.
996  *
997  * The ranges listed in these tables must be sorted by offset.
998  *
999  * When adding new tables here, please also add them to
1000  * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
1001  * scanned for obvious mistakes or typos by the selftests.
1002  */
1003 
1004 static const struct i915_mmio_range gen8_shadowed_regs[] = {
1005 	{ .start =  0x2030, .end =  0x2030 },
1006 	{ .start =  0xA008, .end =  0xA00C },
1007 	{ .start = 0x12030, .end = 0x12030 },
1008 	{ .start = 0x1a030, .end = 0x1a030 },
1009 	{ .start = 0x22030, .end = 0x22030 },
1010 };
1011 
1012 static const struct i915_mmio_range gen11_shadowed_regs[] = {
1013 	{ .start =   0x2030, .end =   0x2030 },
1014 	{ .start =   0x2550, .end =   0x2550 },
1015 	{ .start =   0xA008, .end =   0xA00C },
1016 	{ .start =  0x22030, .end =  0x22030 },
1017 	{ .start =  0x22230, .end =  0x22230 },
1018 	{ .start =  0x22510, .end =  0x22550 },
1019 	{ .start = 0x1C0030, .end = 0x1C0030 },
1020 	{ .start = 0x1C0230, .end = 0x1C0230 },
1021 	{ .start = 0x1C0510, .end = 0x1C0550 },
1022 	{ .start = 0x1C4030, .end = 0x1C4030 },
1023 	{ .start = 0x1C4230, .end = 0x1C4230 },
1024 	{ .start = 0x1C4510, .end = 0x1C4550 },
1025 	{ .start = 0x1C8030, .end = 0x1C8030 },
1026 	{ .start = 0x1C8230, .end = 0x1C8230 },
1027 	{ .start = 0x1C8510, .end = 0x1C8550 },
1028 	{ .start = 0x1D0030, .end = 0x1D0030 },
1029 	{ .start = 0x1D0230, .end = 0x1D0230 },
1030 	{ .start = 0x1D0510, .end = 0x1D0550 },
1031 	{ .start = 0x1D4030, .end = 0x1D4030 },
1032 	{ .start = 0x1D4230, .end = 0x1D4230 },
1033 	{ .start = 0x1D4510, .end = 0x1D4550 },
1034 	{ .start = 0x1D8030, .end = 0x1D8030 },
1035 	{ .start = 0x1D8230, .end = 0x1D8230 },
1036 	{ .start = 0x1D8510, .end = 0x1D8550 },
1037 };
1038 
1039 static const struct i915_mmio_range gen12_shadowed_regs[] = {
1040 	{ .start =   0x2030, .end =   0x2030 },
1041 	{ .start =   0x2510, .end =   0x2550 },
1042 	{ .start =   0xA008, .end =   0xA00C },
1043 	{ .start =   0xA188, .end =   0xA188 },
1044 	{ .start =   0xA278, .end =   0xA278 },
1045 	{ .start =   0xA540, .end =   0xA56C },
1046 	{ .start =   0xC4C8, .end =   0xC4C8 },
1047 	{ .start =   0xC4D4, .end =   0xC4D4 },
1048 	{ .start =   0xC600, .end =   0xC600 },
1049 	{ .start =  0x22030, .end =  0x22030 },
1050 	{ .start =  0x22510, .end =  0x22550 },
1051 	{ .start = 0x1C0030, .end = 0x1C0030 },
1052 	{ .start = 0x1C0510, .end = 0x1C0550 },
1053 	{ .start = 0x1C4030, .end = 0x1C4030 },
1054 	{ .start = 0x1C4510, .end = 0x1C4550 },
1055 	{ .start = 0x1C8030, .end = 0x1C8030 },
1056 	{ .start = 0x1C8510, .end = 0x1C8550 },
1057 	{ .start = 0x1D0030, .end = 0x1D0030 },
1058 	{ .start = 0x1D0510, .end = 0x1D0550 },
1059 	{ .start = 0x1D4030, .end = 0x1D4030 },
1060 	{ .start = 0x1D4510, .end = 0x1D4550 },
1061 	{ .start = 0x1D8030, .end = 0x1D8030 },
1062 	{ .start = 0x1D8510, .end = 0x1D8550 },
1063 
1064 	/*
1065 	 * The rest of these ranges are specific to Xe_HP and beyond, but
1066 	 * are reserved/unused ranges on earlier gen12 platforms, so they can
1067 	 * be safely added to the gen12 table.
1068 	 */
1069 	{ .start = 0x1E0030, .end = 0x1E0030 },
1070 	{ .start = 0x1E0510, .end = 0x1E0550 },
1071 	{ .start = 0x1E4030, .end = 0x1E4030 },
1072 	{ .start = 0x1E4510, .end = 0x1E4550 },
1073 	{ .start = 0x1E8030, .end = 0x1E8030 },
1074 	{ .start = 0x1E8510, .end = 0x1E8550 },
1075 	{ .start = 0x1F0030, .end = 0x1F0030 },
1076 	{ .start = 0x1F0510, .end = 0x1F0550 },
1077 	{ .start = 0x1F4030, .end = 0x1F4030 },
1078 	{ .start = 0x1F4510, .end = 0x1F4550 },
1079 	{ .start = 0x1F8030, .end = 0x1F8030 },
1080 	{ .start = 0x1F8510, .end = 0x1F8550 },
1081 };
1082 
1083 static const struct i915_mmio_range dg2_shadowed_regs[] = {
1084 	{ .start =   0x2030, .end =   0x2030 },
1085 	{ .start =   0x2510, .end =   0x2550 },
1086 	{ .start =   0xA008, .end =   0xA00C },
1087 	{ .start =   0xA188, .end =   0xA188 },
1088 	{ .start =   0xA278, .end =   0xA278 },
1089 	{ .start =   0xA540, .end =   0xA56C },
1090 	{ .start =   0xC4C8, .end =   0xC4C8 },
1091 	{ .start =   0xC4E0, .end =   0xC4E0 },
1092 	{ .start =   0xC600, .end =   0xC600 },
1093 	{ .start =   0xC658, .end =   0xC658 },
1094 	{ .start =  0x22030, .end =  0x22030 },
1095 	{ .start =  0x22510, .end =  0x22550 },
1096 	{ .start = 0x1C0030, .end = 0x1C0030 },
1097 	{ .start = 0x1C0510, .end = 0x1C0550 },
1098 	{ .start = 0x1C4030, .end = 0x1C4030 },
1099 	{ .start = 0x1C4510, .end = 0x1C4550 },
1100 	{ .start = 0x1C8030, .end = 0x1C8030 },
1101 	{ .start = 0x1C8510, .end = 0x1C8550 },
1102 	{ .start = 0x1D0030, .end = 0x1D0030 },
1103 	{ .start = 0x1D0510, .end = 0x1D0550 },
1104 	{ .start = 0x1D4030, .end = 0x1D4030 },
1105 	{ .start = 0x1D4510, .end = 0x1D4550 },
1106 	{ .start = 0x1D8030, .end = 0x1D8030 },
1107 	{ .start = 0x1D8510, .end = 0x1D8550 },
1108 	{ .start = 0x1E0030, .end = 0x1E0030 },
1109 	{ .start = 0x1E0510, .end = 0x1E0550 },
1110 	{ .start = 0x1E4030, .end = 0x1E4030 },
1111 	{ .start = 0x1E4510, .end = 0x1E4550 },
1112 	{ .start = 0x1E8030, .end = 0x1E8030 },
1113 	{ .start = 0x1E8510, .end = 0x1E8550 },
1114 	{ .start = 0x1F0030, .end = 0x1F0030 },
1115 	{ .start = 0x1F0510, .end = 0x1F0550 },
1116 	{ .start = 0x1F4030, .end = 0x1F4030 },
1117 	{ .start = 0x1F4510, .end = 0x1F4550 },
1118 	{ .start = 0x1F8030, .end = 0x1F8030 },
1119 	{ .start = 0x1F8510, .end = 0x1F8550 },
1120 };
1121 
1122 static const struct i915_mmio_range mtl_shadowed_regs[] = {
1123 	{ .start =   0x2030, .end =   0x2030 },
1124 	{ .start =   0x2510, .end =   0x2550 },
1125 	{ .start =   0xA008, .end =   0xA00C },
1126 	{ .start =   0xA188, .end =   0xA188 },
1127 	{ .start =   0xA278, .end =   0xA278 },
1128 	{ .start =   0xA540, .end =   0xA56C },
1129 	{ .start =   0xC050, .end =   0xC050 },
1130 	{ .start =   0xC340, .end =   0xC340 },
1131 	{ .start =   0xC4C8, .end =   0xC4C8 },
1132 	{ .start =   0xC4E0, .end =   0xC4E0 },
1133 	{ .start =   0xC600, .end =   0xC600 },
1134 	{ .start =   0xC658, .end =   0xC658 },
1135 	{ .start =   0xCFD4, .end =   0xCFDC },
1136 	{ .start =  0x22030, .end =  0x22030 },
1137 	{ .start =  0x22510, .end =  0x22550 },
1138 };
1139 
1140 static const struct i915_mmio_range xelpmp_shadowed_regs[] = {
1141 	{ .start = 0x1C0030, .end = 0x1C0030 },
1142 	{ .start = 0x1C0510, .end = 0x1C0550 },
1143 	{ .start = 0x1C8030, .end = 0x1C8030 },
1144 	{ .start = 0x1C8510, .end = 0x1C8550 },
1145 	{ .start = 0x1D0030, .end = 0x1D0030 },
1146 	{ .start = 0x1D0510, .end = 0x1D0550 },
1147 	{ .start = 0x38A008, .end = 0x38A00C },
1148 	{ .start = 0x38A188, .end = 0x38A188 },
1149 	{ .start = 0x38A278, .end = 0x38A278 },
1150 	{ .start = 0x38A540, .end = 0x38A56C },
1151 	{ .start = 0x38A618, .end = 0x38A618 },
1152 	{ .start = 0x38C050, .end = 0x38C050 },
1153 	{ .start = 0x38C340, .end = 0x38C340 },
1154 	{ .start = 0x38C4C8, .end = 0x38C4C8 },
1155 	{ .start = 0x38C4E0, .end = 0x38C4E4 },
1156 	{ .start = 0x38C600, .end = 0x38C600 },
1157 	{ .start = 0x38C658, .end = 0x38C658 },
1158 	{ .start = 0x38CFD4, .end = 0x38CFDC },
1159 };
1160 
1161 static int mmio_range_cmp(u32 key, const struct i915_mmio_range *range)
1162 {
1163 	if (key < range->start)
1164 		return -1;
1165 	else if (key > range->end)
1166 		return 1;
1167 	else
1168 		return 0;
1169 }
1170 
1171 static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1172 {
1173 	if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1174 		return false;
1175 
1176 	if (IS_GSI_REG(offset))
1177 		offset += uncore->gsi_offset;
1178 
1179 	return BSEARCH(offset,
1180 		       uncore->shadowed_reg_table,
1181 		       uncore->shadowed_reg_table_entries,
1182 		       mmio_range_cmp);
1183 }
1184 
1185 static enum forcewake_domains
1186 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1187 {
1188 	return FORCEWAKE_RENDER;
1189 }
1190 
1191 #define __fwtable_reg_read_fw_domains(uncore, offset) \
1192 ({ \
1193 	enum forcewake_domains __fwd = 0; \
1194 	if (NEEDS_FORCE_WAKE((offset))) \
1195 		__fwd = find_fw_domain(uncore, offset); \
1196 	__fwd; \
1197 })
1198 
1199 #define __fwtable_reg_write_fw_domains(uncore, offset) \
1200 ({ \
1201 	enum forcewake_domains __fwd = 0; \
1202 	const u32 __offset = (offset); \
1203 	if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1204 		__fwd = find_fw_domain(uncore, __offset); \
1205 	__fwd; \
1206 })
1207 
1208 #define GEN_FW_RANGE(s, e, d) \
1209 	{ .start = (s), .end = (e), .domains = (d) }
1210 
1211 /*
1212  * All platforms' forcewake tables below must be sorted by offset ranges.
1213  * Furthermore, new forcewake tables added should be "watertight" and have
1214  * no gaps between ranges.
1215  *
1216  * When there are multiple consecutive ranges listed in the bspec with
1217  * the same forcewake domain, it is customary to combine them into a single
1218  * row in the tables below to keep the tables small and lookups fast.
1219  * Likewise, reserved/unused ranges may be combined with the preceding and/or
1220  * following ranges since the driver will never be making MMIO accesses in
1221  * those ranges.
1222  *
1223  * For example, if the bspec were to list:
1224  *
1225  *    ...
1226  *    0x1000 - 0x1fff:  GT
1227  *    0x2000 - 0x2cff:  GT
1228  *    0x2d00 - 0x2fff:  unused/reserved
1229  *    0x3000 - 0xffff:  GT
1230  *    ...
1231  *
1232  * these could all be represented by a single line in the code:
1233  *
1234  *   GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
1235  *
1236  * When adding new forcewake tables here, please also add them to
1237  * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
1238  * scanned for obvious mistakes or typos by the selftests.
1239  */
1240 
1241 static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1242 	GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1243 };
1244 
1245 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1246 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1247 	GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
1248 	GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
1249 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1250 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
1251 	GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
1252 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1253 };
1254 
1255 static const struct intel_forcewake_range __chv_fw_ranges[] = {
1256 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1257 	GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1258 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1259 	GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1260 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1261 	GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1262 	GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1263 	GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1264 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1265 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1266 	GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1267 	GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1268 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1269 	GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1270 	GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1271 	GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1272 };
1273 
1274 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1275 	GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1276 	GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1277 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1278 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1279 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1280 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1281 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1282 	GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1283 	GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1284 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1285 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1286 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1287 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1288 	GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1289 	GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1290 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1291 	GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1292 	GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1293 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1294 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1295 	GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1296 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1297 	GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1298 	GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1299 	GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1300 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1301 	GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1302 	GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1303 	GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1304 	GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1305 	GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1306 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1307 };
1308 
1309 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1310 	GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1311 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1312 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1313 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1314 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1315 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1316 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1317 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1318 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1319 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1320 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1321 	GEN_FW_RANGE(0x8800, 0x8bff, 0),
1322 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1323 	GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1324 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1325 	GEN_FW_RANGE(0x9560, 0x95ff, 0),
1326 	GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1327 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1328 	GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1329 	GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1330 	GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1331 	GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1332 	GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1333 	GEN_FW_RANGE(0x24000, 0x2407f, 0),
1334 	GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1335 	GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1336 	GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1337 	GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1338 	GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1339 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1340 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1341 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1342 	GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1343 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1344 	GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1345 };
1346 
1347 static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1348 	GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1349 		0x0   -  0xaff: reserved
1350 		0xb00 - 0x1fff: always on */
1351 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1352 	GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1353 	GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1354 	GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1355 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1356 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1357 		0x4000 - 0x48ff: gt
1358 		0x4900 - 0x51ff: reserved */
1359 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1360 		0x5200 - 0x53ff: render
1361 		0x5400 - 0x54ff: reserved
1362 		0x5500 - 0x7fff: render */
1363 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1364 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1365 	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1366 		0x8160 - 0x817f: reserved
1367 		0x8180 - 0x81ff: always on */
1368 	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1369 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1370 	GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1371 		0x8500 - 0x87ff: gt
1372 		0x8800 - 0x8fff: reserved
1373 		0x9000 - 0x947f: gt
1374 		0x9480 - 0x94cf: reserved */
1375 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1376 	GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1377 		0x9560 - 0x95ff: always on
1378 		0x9600 - 0x97ff: reserved */
1379 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1380 	GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1381 	GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1382 		0xb400 - 0xbf7f: gt
1383 		0xb480 - 0xbfff: reserved
1384 		0xc000 - 0xcfff: gt */
1385 	GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1386 	GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1387 	GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1388 	GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1389 		0xdc00 - 0xddff: render
1390 		0xde00 - 0xde7f: reserved
1391 		0xde80 - 0xe8ff: render
1392 		0xe900 - 0xefff: reserved */
1393 	GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1394 		 0xf000 - 0xffff: gt
1395 		0x10000 - 0x147ff: reserved */
1396 	GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1397 		0x14800 - 0x14fff: render
1398 		0x15000 - 0x16dff: reserved
1399 		0x16e00 - 0x1bfff: render
1400 		0x1c000 - 0x1ffff: reserved */
1401 	GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1402 	GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1403 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1404 	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1405 		0x24000 - 0x2407f: always on
1406 		0x24080 - 0x2417f: reserved */
1407 	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1408 		0x24180 - 0x241ff: gt
1409 		0x24200 - 0x249ff: reserved */
1410 	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1411 		0x24a00 - 0x24a7f: render
1412 		0x24a80 - 0x251ff: reserved */
1413 	GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1414 		0x25200 - 0x252ff: gt
1415 		0x25300 - 0x255ff: reserved */
1416 	GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1417 	GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1418 		0x25680 - 0x256ff: VD2
1419 		0x25700 - 0x259ff: reserved */
1420 	GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1421 	GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1422 		0x25a80 - 0x25aff: VD2
1423 		0x25b00 - 0x2ffff: reserved */
1424 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1425 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1426 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1427 		0x1c0000 - 0x1c2bff: VD0
1428 		0x1c2c00 - 0x1c2cff: reserved
1429 		0x1c2d00 - 0x1c2dff: VD0
1430 		0x1c2e00 - 0x1c3eff: reserved
1431 		0x1c3f00 - 0x1c3fff: VD0 */
1432 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1433 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1434 		0x1c8000 - 0x1ca0ff: VE0
1435 		0x1ca100 - 0x1cbeff: reserved
1436 		0x1cbf00 - 0x1cbfff: VE0 */
1437 	GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1438 		0x1cc000 - 0x1ccfff: VD0
1439 		0x1cd000 - 0x1cffff: reserved */
1440 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1441 		0x1d0000 - 0x1d2bff: VD2
1442 		0x1d2c00 - 0x1d2cff: reserved
1443 		0x1d2d00 - 0x1d2dff: VD2
1444 		0x1d2e00 - 0x1d3eff: reserved
1445 		0x1d3f00 - 0x1d3fff: VD2 */
1446 };
1447 
1448 static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1449 	GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1450 		  0x0 -  0xaff: reserved
1451 		0xb00 - 0x1fff: always on */
1452 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1453 	GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),
1454 	GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*
1455 		0x4b00 - 0x4fff: reserved
1456 		0x5000 - 0x51ff: always on */
1457 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1458 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1459 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1460 	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1461 		0x8160 - 0x817f: reserved
1462 		0x8180 - 0x81ff: always on */
1463 	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1464 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1465 	GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*
1466 		0x8500 - 0x87ff: gt
1467 		0x8800 - 0x8c7f: reserved
1468 		0x8c80 - 0x8cff: gt (DG2 only) */
1469 	GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*
1470 		0x8d00 - 0x8dff: render (DG2 only)
1471 		0x8e00 - 0x8fff: reserved */
1472 	GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*
1473 		0x9000 - 0x947f: gt
1474 		0x9480 - 0x94cf: reserved */
1475 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1476 	GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1477 		0x9560 - 0x95ff: always on
1478 		0x9600 - 0x967f: reserved */
1479 	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1480 		0x9680 - 0x96ff: render
1481 		0x9700 - 0x97ff: reserved */
1482 	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1483 		0x9800 - 0xb4ff: gt
1484 		0xb500 - 0xbfff: reserved
1485 		0xc000 - 0xcfff: gt */
1486 	GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1487 	GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1488 	GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1489 	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1490 	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1491 		0xdd00 - 0xddff: gt
1492 		0xde00 - 0xde7f: reserved */
1493 	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1494 		0xde80 - 0xdfff: render
1495 		0xe000 - 0xe0ff: reserved
1496 		0xe100 - 0xe8ff: render */
1497 	GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*
1498 		0xe900 - 0xe9ff: gt
1499 		0xea00 - 0xefff: reserved
1500 		0xf000 - 0xffff: gt */
1501 	GEN_FW_RANGE(0x10000, 0x12fff, 0), /*
1502 		0x10000 - 0x11fff: reserved
1503 		0x12000 - 0x127ff: always on
1504 		0x12800 - 0x12fff: reserved */
1505 	GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0),
1506 	GEN_FW_RANGE(0x13200, 0x147ff, FORCEWAKE_MEDIA_VDBOX2), /*
1507 		0x13200 - 0x133ff: VD2 (DG2 only)
1508 		0x13400 - 0x147ff: reserved */
1509 	GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),
1510 	GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*
1511 		0x15000 - 0x15fff: gt (DG2 only)
1512 		0x16000 - 0x16dff: reserved */
1513 	GEN_FW_RANGE(0x16e00, 0x21fff, FORCEWAKE_RENDER), /*
1514 		0x16e00 - 0x1ffff: render
1515 		0x20000 - 0x21fff: reserved */
1516 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1517 	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1518 		0x24000 - 0x2407f: always on
1519 		0x24080 - 0x2417f: reserved */
1520 	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1521 		0x24180 - 0x241ff: gt
1522 		0x24200 - 0x249ff: reserved */
1523 	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1524 		0x24a00 - 0x24a7f: render
1525 		0x24a80 - 0x251ff: reserved */
1526 	GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*
1527 		0x25200 - 0x252ff: gt
1528 		0x25300 - 0x25fff: reserved */
1529 	GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
1530 		0x26000 - 0x27fff: render
1531 		0x28000 - 0x29fff: reserved
1532 		0x2a000 - 0x2ffff: undocumented */
1533 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1534 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1535 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1536 		0x1c0000 - 0x1c2bff: VD0
1537 		0x1c2c00 - 0x1c2cff: reserved
1538 		0x1c2d00 - 0x1c2dff: VD0
1539 		0x1c2e00 - 0x1c3eff: VD0
1540 		0x1c3f00 - 0x1c3fff: VD0 */
1541 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*
1542 		0x1c4000 - 0x1c6bff: VD1
1543 		0x1c6c00 - 0x1c6cff: reserved
1544 		0x1c6d00 - 0x1c6dff: VD1
1545 		0x1c6e00 - 0x1c7fff: reserved */
1546 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1547 		0x1c8000 - 0x1ca0ff: VE0
1548 		0x1ca100 - 0x1cbfff: reserved */
1549 	GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),
1550 	GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),
1551 	GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),
1552 	GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),
1553 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1554 		0x1d0000 - 0x1d2bff: VD2
1555 		0x1d2c00 - 0x1d2cff: reserved
1556 		0x1d2d00 - 0x1d2dff: VD2
1557 		0x1d2e00 - 0x1d3dff: VD2
1558 		0x1d3e00 - 0x1d3eff: reserved
1559 		0x1d3f00 - 0x1d3fff: VD2 */
1560 	GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*
1561 		0x1d4000 - 0x1d6bff: VD3
1562 		0x1d6c00 - 0x1d6cff: reserved
1563 		0x1d6d00 - 0x1d6dff: VD3
1564 		0x1d6e00 - 0x1d7fff: reserved */
1565 	GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*
1566 		0x1d8000 - 0x1da0ff: VE1
1567 		0x1da100 - 0x1dffff: reserved */
1568 	GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*
1569 		0x1e0000 - 0x1e2bff: VD4
1570 		0x1e2c00 - 0x1e2cff: reserved
1571 		0x1e2d00 - 0x1e2dff: VD4
1572 		0x1e2e00 - 0x1e3eff: reserved
1573 		0x1e3f00 - 0x1e3fff: VD4 */
1574 	GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*
1575 		0x1e4000 - 0x1e6bff: VD5
1576 		0x1e6c00 - 0x1e6cff: reserved
1577 		0x1e6d00 - 0x1e6dff: VD5
1578 		0x1e6e00 - 0x1e7fff: reserved */
1579 	GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*
1580 		0x1e8000 - 0x1ea0ff: VE2
1581 		0x1ea100 - 0x1effff: reserved */
1582 	GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*
1583 		0x1f0000 - 0x1f2bff: VD6
1584 		0x1f2c00 - 0x1f2cff: reserved
1585 		0x1f2d00 - 0x1f2dff: VD6
1586 		0x1f2e00 - 0x1f3eff: reserved
1587 		0x1f3f00 - 0x1f3fff: VD6 */
1588 	GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*
1589 		0x1f4000 - 0x1f6bff: VD7
1590 		0x1f6c00 - 0x1f6cff: reserved
1591 		0x1f6d00 - 0x1f6dff: VD7
1592 		0x1f6e00 - 0x1f7fff: reserved */
1593 	GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1594 };
1595 
1596 static const struct intel_forcewake_range __mtl_fw_ranges[] = {
1597 	GEN_FW_RANGE(0x0, 0xaff, 0),
1598 	GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1599 	GEN_FW_RANGE(0xc00, 0xfff, 0),
1600 	GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1601 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1602 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1603 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1604 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1605 		0x4000 - 0x48ff: render
1606 		0x4900 - 0x51ff: reserved */
1607 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1608 		0x5200 - 0x53ff: render
1609 		0x5400 - 0x54ff: reserved
1610 		0x5500 - 0x7fff: render */
1611 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1612 	GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), /*
1613 		0x8140 - 0x815f: render
1614 		0x8160 - 0x817f: reserved */
1615 	GEN_FW_RANGE(0x8180, 0x81ff, 0),
1616 	GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1617 		0x8200 - 0x87ff: gt
1618 		0x8800 - 0x8dff: reserved
1619 		0x8e00 - 0x8f7f: gt
1620 		0x8f80 - 0x8fff: reserved
1621 		0x9000 - 0x947f: gt
1622 		0x9480 - 0x94cf: reserved */
1623 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1624 	GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1625 		0x9560 - 0x95ff: always on
1626 		0x9600 - 0x967f: reserved */
1627 	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1628 		0x9680 - 0x96ff: render
1629 		0x9700 - 0x97ff: reserved */
1630 	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1631 		0x9800 - 0xb4ff: gt
1632 		0xb500 - 0xbfff: reserved
1633 		0xc000 - 0xcfff: gt */
1634 	GEN_FW_RANGE(0xd000, 0xd7ff, 0), /*
1635 		0xd000 - 0xd3ff: always on
1636 		0xd400 - 0xd7ff: reserved */
1637 	GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1638 	GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1639 	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1640 	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1641 		0xdd00 - 0xddff: gt
1642 		0xde00 - 0xde7f: reserved */
1643 	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1644 		0xde80 - 0xdfff: render
1645 		0xe000 - 0xe0ff: reserved
1646 		0xe100 - 0xe8ff: render */
1647 	GEN_FW_RANGE(0xe900, 0xe9ff, FORCEWAKE_GT),
1648 	GEN_FW_RANGE(0xea00, 0x147ff, 0), /*
1649 		 0xea00 - 0x11fff: reserved
1650 		0x12000 - 0x127ff: always on
1651 		0x12800 - 0x147ff: reserved */
1652 	GEN_FW_RANGE(0x14800, 0x19fff, FORCEWAKE_GT), /*
1653 		0x14800 - 0x153ff: gt
1654 		0x15400 - 0x19fff: reserved */
1655 	GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1656 		0x1a000 - 0x1bfff: render
1657 		0x1c000 - 0x21fff: reserved */
1658 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1659 	GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
1660 		0x24000 - 0x2407f: always on
1661 		0x24080 - 0x2ffff: reserved */
1662 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1663 	GEN_FW_RANGE(0x40000, 0x1901ef, 0),
1664 	GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT)
1665 		/* FIXME: WA to wake GT while triggering H2G */
1666 };
1667 
1668 /*
1669  * Note that the register ranges here are the final offsets after
1670  * translation of the GSI block to the 0x380000 offset.
1671  *
1672  * NOTE:  There are a couple MCR ranges near the bottom of this table
1673  * that need to power up either VD0 or VD2 depending on which replicated
1674  * instance of the register we're trying to access.  Our forcewake logic
1675  * at the moment doesn't have a good way to take steering into consideration,
1676  * and the driver doesn't even access any registers in those ranges today,
1677  * so for now we just mark those ranges as FORCEWAKE_ALL.  That will ensure
1678  * proper operation if we do start using the ranges in the future, and we
1679  * can determine at that time whether it's worth adding extra complexity to
1680  * the forcewake handling to take steering into consideration.
1681  */
1682 static const struct intel_forcewake_range __xelpmp_fw_ranges[] = {
1683 	GEN_FW_RANGE(0x0, 0x115fff, 0), /* render GT range */
1684 	GEN_FW_RANGE(0x116000, 0x11ffff, FORCEWAKE_GSC), /*
1685 		0x116000 - 0x117fff: gsc
1686 		0x118000 - 0x119fff: reserved
1687 		0x11a000 - 0x11efff: gsc
1688 		0x11f000 - 0x11ffff: reserved */
1689 	GEN_FW_RANGE(0x120000, 0x1bffff, 0), /* non-GT range */
1690 	GEN_FW_RANGE(0x1c0000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX0), /*
1691 		0x1c0000 - 0x1c3dff: VD0
1692 		0x1c3e00 - 0x1c3eff: reserved
1693 		0x1c3f00 - 0x1c3fff: VD0
1694 		0x1c4000 - 0x1c7fff: reserved */
1695 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1696 		0x1c8000 - 0x1ca0ff: VE0
1697 		0x1ca100 - 0x1cbfff: reserved */
1698 	GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1699 		0x1cc000 - 0x1cdfff: VD0
1700 		0x1ce000 - 0x1cffff: reserved */
1701 	GEN_FW_RANGE(0x1d0000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX2), /*
1702 		0x1d0000 - 0x1d3dff: VD2
1703 		0x1d3e00 - 0x1d3eff: reserved
1704 		0x1d4000 - 0x1d7fff: VD2 */
1705 	GEN_FW_RANGE(0x1d8000, 0x1da0ff, FORCEWAKE_MEDIA_VEBOX1),
1706 	GEN_FW_RANGE(0x1da100, 0x380aff, 0), /*
1707 		0x1da100 - 0x23ffff: reserved
1708 		0x240000 - 0x37ffff: non-GT range
1709 		0x380000 - 0x380aff: reserved */
1710 	GEN_FW_RANGE(0x380b00, 0x380bff, FORCEWAKE_GT),
1711 	GEN_FW_RANGE(0x380c00, 0x380fff, 0),
1712 	GEN_FW_RANGE(0x381000, 0x38817f, FORCEWAKE_GT), /*
1713 		0x381000 - 0x381fff: gt
1714 		0x382000 - 0x383fff: reserved
1715 		0x384000 - 0x384aff: gt
1716 		0x384b00 - 0x3851ff: reserved
1717 		0x385200 - 0x3871ff: gt
1718 		0x387200 - 0x387fff: reserved
1719 		0x388000 - 0x38813f: gt
1720 		0x388140 - 0x38817f: reserved */
1721 	GEN_FW_RANGE(0x388180, 0x3882ff, 0), /*
1722 		0x388180 - 0x3881ff: always on
1723 		0x388200 - 0x3882ff: reserved */
1724 	GEN_FW_RANGE(0x388300, 0x38955f, FORCEWAKE_GT), /*
1725 		0x388300 - 0x38887f: gt
1726 		0x388880 - 0x388fff: reserved
1727 		0x389000 - 0x38947f: gt
1728 		0x389480 - 0x38955f: reserved */
1729 	GEN_FW_RANGE(0x389560, 0x389fff, 0), /*
1730 		0x389560 - 0x3895ff: always on
1731 		0x389600 - 0x389fff: reserved */
1732 	GEN_FW_RANGE(0x38a000, 0x38cfff, FORCEWAKE_GT), /*
1733 		0x38a000 - 0x38afff: gt
1734 		0x38b000 - 0x38bfff: reserved
1735 		0x38c000 - 0x38cfff: gt */
1736 	GEN_FW_RANGE(0x38d000, 0x38d11f, 0),
1737 	GEN_FW_RANGE(0x38d120, 0x391fff, FORCEWAKE_GT), /*
1738 		0x38d120 - 0x38dfff: gt
1739 		0x38e000 - 0x38efff: reserved
1740 		0x38f000 - 0x38ffff: gt
1741 		0x389000 - 0x391fff: reserved */
1742 	GEN_FW_RANGE(0x392000, 0x392fff, 0), /*
1743 		0x392000 - 0x3927ff: always on
1744 		0x392800 - 0x292fff: reserved */
1745 	GEN_FW_RANGE(0x393000, 0x3931ff, FORCEWAKE_GT),
1746 	GEN_FW_RANGE(0x393200, 0x39323f, FORCEWAKE_ALL), /* instance-based, see note above */
1747 	GEN_FW_RANGE(0x393240, 0x3933ff, FORCEWAKE_GT),
1748 	GEN_FW_RANGE(0x393400, 0x3934ff, FORCEWAKE_ALL), /* instance-based, see note above */
1749 	GEN_FW_RANGE(0x393500, 0x393c7f, 0), /*
1750 		0x393500 - 0x393bff: reserved
1751 		0x393c00 - 0x393c7f: always on */
1752 	GEN_FW_RANGE(0x393c80, 0x393dff, FORCEWAKE_GT),
1753 };
1754 
1755 static void
1756 ilk_dummy_write(struct intel_uncore *uncore)
1757 {
1758 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1759 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
1760 	 * hence harmless to write 0 into. */
1761 	__raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
1762 }
1763 
1764 static void
1765 __unclaimed_reg_debug(struct intel_uncore *uncore,
1766 		      const i915_reg_t reg,
1767 		      const bool read)
1768 {
1769 	if (drm_WARN(&uncore->i915->drm,
1770 		     check_for_unclaimed_mmio(uncore),
1771 		     "Unclaimed %s register 0x%x\n",
1772 		     read ? "read from" : "write to",
1773 		     i915_mmio_reg_offset(reg)))
1774 		/* Only report the first N failures */
1775 		uncore->i915->params.mmio_debug--;
1776 }
1777 
1778 static void
1779 __unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1780 			       const i915_reg_t reg,
1781 			       const bool read)
1782 {
1783 	if (check_for_unclaimed_mmio(uncore))
1784 		drm_dbg(&uncore->i915->drm,
1785 			"Unclaimed access detected before %s register 0x%x\n",
1786 			read ? "read from" : "write to",
1787 			i915_mmio_reg_offset(reg));
1788 }
1789 
1790 static inline bool __must_check
1791 unclaimed_reg_debug_header(struct intel_uncore *uncore,
1792 			   const i915_reg_t reg, const bool read)
1793 {
1794 	if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
1795 		return false;
1796 
1797 	/* interrupts are disabled and re-enabled around uncore->lock usage */
1798 	lockdep_assert_held(&uncore->lock);
1799 
1800 	spin_lock(&uncore->debug->lock);
1801 	__unclaimed_previous_reg_debug(uncore, reg, read);
1802 
1803 	return true;
1804 }
1805 
1806 static inline void
1807 unclaimed_reg_debug_footer(struct intel_uncore *uncore,
1808 			   const i915_reg_t reg, const bool read)
1809 {
1810 	/* interrupts are disabled and re-enabled around uncore->lock usage */
1811 	lockdep_assert_held(&uncore->lock);
1812 
1813 	__unclaimed_reg_debug(uncore, reg, read);
1814 	spin_unlock(&uncore->debug->lock);
1815 }
1816 
1817 #define __vgpu_read(x) \
1818 static u##x \
1819 vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1820 	u##x val = __raw_uncore_read##x(uncore, reg); \
1821 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1822 	return val; \
1823 }
1824 __vgpu_read(8)
1825 __vgpu_read(16)
1826 __vgpu_read(32)
1827 __vgpu_read(64)
1828 
1829 #define GEN2_READ_HEADER(x) \
1830 	u##x val = 0; \
1831 	assert_rpm_wakelock_held(uncore->rpm);
1832 
1833 #define GEN2_READ_FOOTER \
1834 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1835 	return val
1836 
1837 #define __gen2_read(x) \
1838 static u##x \
1839 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1840 	GEN2_READ_HEADER(x); \
1841 	val = __raw_uncore_read##x(uncore, reg); \
1842 	GEN2_READ_FOOTER; \
1843 }
1844 
1845 #define __gen5_read(x) \
1846 static u##x \
1847 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1848 	GEN2_READ_HEADER(x); \
1849 	ilk_dummy_write(uncore); \
1850 	val = __raw_uncore_read##x(uncore, reg); \
1851 	GEN2_READ_FOOTER; \
1852 }
1853 
1854 __gen5_read(8)
1855 __gen5_read(16)
1856 __gen5_read(32)
1857 __gen5_read(64)
1858 __gen2_read(8)
1859 __gen2_read(16)
1860 __gen2_read(32)
1861 __gen2_read(64)
1862 
1863 #undef __gen5_read
1864 #undef __gen2_read
1865 
1866 #undef GEN2_READ_FOOTER
1867 #undef GEN2_READ_HEADER
1868 
1869 #define GEN6_READ_HEADER(x) \
1870 	u32 offset = i915_mmio_reg_offset(reg); \
1871 	unsigned long irqflags; \
1872 	bool unclaimed_reg_debug; \
1873 	u##x val = 0; \
1874 	assert_rpm_wakelock_held(uncore->rpm); \
1875 	spin_lock_irqsave(&uncore->lock, irqflags); \
1876 	unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true)
1877 
1878 #define GEN6_READ_FOOTER \
1879 	if (unclaimed_reg_debug) \
1880 		unclaimed_reg_debug_footer(uncore, reg, true);	\
1881 	spin_unlock_irqrestore(&uncore->lock, irqflags); \
1882 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1883 	return val
1884 
1885 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1886 					enum forcewake_domains fw_domains)
1887 {
1888 	struct intel_uncore_forcewake_domain *domain;
1889 	unsigned int tmp;
1890 
1891 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1892 
1893 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1894 		fw_domain_arm_timer(domain);
1895 
1896 	fw_domains_get(uncore, fw_domains);
1897 }
1898 
1899 static inline void __force_wake_auto(struct intel_uncore *uncore,
1900 				     enum forcewake_domains fw_domains)
1901 {
1902 	GEM_BUG_ON(!fw_domains);
1903 
1904 	/* Turn on all requested but inactive supported forcewake domains. */
1905 	fw_domains &= uncore->fw_domains;
1906 	fw_domains &= ~uncore->fw_domains_active;
1907 
1908 	if (fw_domains)
1909 		___force_wake_auto(uncore, fw_domains);
1910 }
1911 
1912 #define __gen_fwtable_read(x) \
1913 static u##x \
1914 fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
1915 { \
1916 	enum forcewake_domains fw_engine; \
1917 	GEN6_READ_HEADER(x); \
1918 	fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
1919 	if (fw_engine) \
1920 		__force_wake_auto(uncore, fw_engine); \
1921 	val = __raw_uncore_read##x(uncore, reg); \
1922 	GEN6_READ_FOOTER; \
1923 }
1924 
1925 static enum forcewake_domains
1926 fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1927 	return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
1928 }
1929 
1930 __gen_fwtable_read(8)
1931 __gen_fwtable_read(16)
1932 __gen_fwtable_read(32)
1933 __gen_fwtable_read(64)
1934 
1935 #undef __gen_fwtable_read
1936 #undef GEN6_READ_FOOTER
1937 #undef GEN6_READ_HEADER
1938 
1939 #define GEN2_WRITE_HEADER \
1940 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1941 	assert_rpm_wakelock_held(uncore->rpm); \
1942 
1943 #define GEN2_WRITE_FOOTER
1944 
1945 #define __gen2_write(x) \
1946 static void \
1947 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1948 	GEN2_WRITE_HEADER; \
1949 	__raw_uncore_write##x(uncore, reg, val); \
1950 	GEN2_WRITE_FOOTER; \
1951 }
1952 
1953 #define __gen5_write(x) \
1954 static void \
1955 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1956 	GEN2_WRITE_HEADER; \
1957 	ilk_dummy_write(uncore); \
1958 	__raw_uncore_write##x(uncore, reg, val); \
1959 	GEN2_WRITE_FOOTER; \
1960 }
1961 
1962 __gen5_write(8)
1963 __gen5_write(16)
1964 __gen5_write(32)
1965 __gen2_write(8)
1966 __gen2_write(16)
1967 __gen2_write(32)
1968 
1969 #undef __gen5_write
1970 #undef __gen2_write
1971 
1972 #undef GEN2_WRITE_FOOTER
1973 #undef GEN2_WRITE_HEADER
1974 
1975 #define GEN6_WRITE_HEADER \
1976 	u32 offset = i915_mmio_reg_offset(reg); \
1977 	unsigned long irqflags; \
1978 	bool unclaimed_reg_debug; \
1979 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1980 	assert_rpm_wakelock_held(uncore->rpm); \
1981 	spin_lock_irqsave(&uncore->lock, irqflags); \
1982 	unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false)
1983 
1984 #define GEN6_WRITE_FOOTER \
1985 	if (unclaimed_reg_debug) \
1986 		unclaimed_reg_debug_footer(uncore, reg, false); \
1987 	spin_unlock_irqrestore(&uncore->lock, irqflags)
1988 
1989 #define __gen6_write(x) \
1990 static void \
1991 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1992 	GEN6_WRITE_HEADER; \
1993 	if (NEEDS_FORCE_WAKE(offset)) \
1994 		__gen6_gt_wait_for_fifo(uncore); \
1995 	__raw_uncore_write##x(uncore, reg, val); \
1996 	GEN6_WRITE_FOOTER; \
1997 }
1998 __gen6_write(8)
1999 __gen6_write(16)
2000 __gen6_write(32)
2001 
2002 #define __gen_fwtable_write(x) \
2003 static void \
2004 fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2005 	enum forcewake_domains fw_engine; \
2006 	GEN6_WRITE_HEADER; \
2007 	fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
2008 	if (fw_engine) \
2009 		__force_wake_auto(uncore, fw_engine); \
2010 	__raw_uncore_write##x(uncore, reg, val); \
2011 	GEN6_WRITE_FOOTER; \
2012 }
2013 
2014 static enum forcewake_domains
2015 fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
2016 {
2017 	return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
2018 }
2019 
2020 __gen_fwtable_write(8)
2021 __gen_fwtable_write(16)
2022 __gen_fwtable_write(32)
2023 
2024 #undef __gen_fwtable_write
2025 #undef GEN6_WRITE_FOOTER
2026 #undef GEN6_WRITE_HEADER
2027 
2028 #define __vgpu_write(x) \
2029 static void \
2030 vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2031 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2032 	__raw_uncore_write##x(uncore, reg, val); \
2033 }
2034 __vgpu_write(8)
2035 __vgpu_write(16)
2036 __vgpu_write(32)
2037 
2038 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
2039 do { \
2040 	(uncore)->funcs.mmio_writeb = x##_write8; \
2041 	(uncore)->funcs.mmio_writew = x##_write16; \
2042 	(uncore)->funcs.mmio_writel = x##_write32; \
2043 } while (0)
2044 
2045 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
2046 do { \
2047 	(uncore)->funcs.mmio_readb = x##_read8; \
2048 	(uncore)->funcs.mmio_readw = x##_read16; \
2049 	(uncore)->funcs.mmio_readl = x##_read32; \
2050 	(uncore)->funcs.mmio_readq = x##_read64; \
2051 } while (0)
2052 
2053 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
2054 do { \
2055 	ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
2056 	(uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2057 } while (0)
2058 
2059 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
2060 do { \
2061 	ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
2062 	(uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2063 } while (0)
2064 
2065 static int __fw_domain_init(struct intel_uncore *uncore,
2066 			    enum forcewake_domain_id domain_id,
2067 			    i915_reg_t reg_set,
2068 			    i915_reg_t reg_ack)
2069 {
2070 	struct intel_uncore_forcewake_domain *d;
2071 
2072 	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2073 	GEM_BUG_ON(uncore->fw_domain[domain_id]);
2074 
2075 	if (i915_inject_probe_failure(uncore->i915))
2076 		return -ENOMEM;
2077 
2078 	d = kzalloc(sizeof(*d), GFP_KERNEL);
2079 	if (!d)
2080 		return -ENOMEM;
2081 
2082 	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
2083 	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
2084 
2085 	d->uncore = uncore;
2086 	d->wake_count = 0;
2087 	d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2088 	d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
2089 
2090 	d->id = domain_id;
2091 
2092 	BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
2093 	BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
2094 	BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
2095 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
2096 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
2097 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
2098 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
2099 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
2100 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
2101 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
2102 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
2103 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
2104 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
2105 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
2106 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
2107 	BUILD_BUG_ON(FORCEWAKE_GSC != (1 << FW_DOMAIN_ID_GSC));
2108 
2109 	d->mask = BIT(domain_id);
2110 
2111 	hrtimer_setup(&d->timer, intel_uncore_fw_release_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2112 
2113 	uncore->fw_domains |= BIT(domain_id);
2114 
2115 	fw_domain_reset(d);
2116 
2117 	uncore->fw_domain[domain_id] = d;
2118 
2119 	return 0;
2120 }
2121 
2122 static void fw_domain_fini(struct intel_uncore *uncore,
2123 			   enum forcewake_domain_id domain_id)
2124 {
2125 	struct intel_uncore_forcewake_domain *d;
2126 
2127 	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2128 
2129 	d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2130 	if (!d)
2131 		return;
2132 
2133 	uncore->fw_domains &= ~BIT(domain_id);
2134 	drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2135 	drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
2136 	kfree(d);
2137 }
2138 
2139 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2140 {
2141 	struct intel_uncore_forcewake_domain *d;
2142 	int tmp;
2143 
2144 	for_each_fw_domain(d, uncore, tmp)
2145 		fw_domain_fini(uncore, d->id);
2146 }
2147 
2148 static const struct intel_uncore_fw_get uncore_get_fallback = {
2149 	.force_wake_get = fw_domains_get_with_fallback
2150 };
2151 
2152 static const struct intel_uncore_fw_get uncore_get_normal = {
2153 	.force_wake_get = fw_domains_get_normal,
2154 };
2155 
2156 static const struct intel_uncore_fw_get uncore_get_thread_status = {
2157 	.force_wake_get = fw_domains_get_with_thread_status
2158 };
2159 
2160 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
2161 {
2162 	struct drm_i915_private *i915 = uncore->i915;
2163 	int ret = 0;
2164 
2165 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2166 
2167 #define fw_domain_init(uncore__, id__, set__, ack__) \
2168 	(ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2169 
2170 	if (GRAPHICS_VER(i915) >= 11) {
2171 		intel_engine_mask_t emask;
2172 		int i;
2173 
2174 		/* we'll prune the domains of missing engines later */
2175 		emask = uncore->gt->info.engine_mask;
2176 
2177 		uncore->fw_get_funcs = &uncore_get_fallback;
2178 		if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2179 			fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2180 				       FORCEWAKE_GT_GEN9,
2181 				       FORCEWAKE_ACK_GT_MTL);
2182 		else
2183 			fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2184 				       FORCEWAKE_GT_GEN9,
2185 				       FORCEWAKE_ACK_GT_GEN9);
2186 
2187 		if (RCS_MASK(uncore->gt) || CCS_MASK(uncore->gt))
2188 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2189 				       FORCEWAKE_RENDER_GEN9,
2190 				       FORCEWAKE_ACK_RENDER_GEN9);
2191 
2192 		for (i = 0; i < I915_MAX_VCS; i++) {
2193 			if (!__HAS_ENGINE(emask, _VCS(i)))
2194 				continue;
2195 
2196 			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
2197 				       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
2198 				       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
2199 		}
2200 		for (i = 0; i < I915_MAX_VECS; i++) {
2201 			if (!__HAS_ENGINE(emask, _VECS(i)))
2202 				continue;
2203 
2204 			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
2205 				       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
2206 				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
2207 		}
2208 
2209 		if (uncore->gt->type == GT_MEDIA)
2210 			fw_domain_init(uncore, FW_DOMAIN_ID_GSC,
2211 				       FORCEWAKE_REQ_GSC, FORCEWAKE_ACK_GSC);
2212 	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2213 		uncore->fw_get_funcs = &uncore_get_fallback;
2214 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2215 			       FORCEWAKE_RENDER_GEN9,
2216 			       FORCEWAKE_ACK_RENDER_GEN9);
2217 		fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2218 			       FORCEWAKE_GT_GEN9,
2219 			       FORCEWAKE_ACK_GT_GEN9);
2220 		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2221 			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
2222 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
2223 		uncore->fw_get_funcs = &uncore_get_normal;
2224 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2225 			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
2226 		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2227 			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
2228 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2229 		uncore->fw_get_funcs = &uncore_get_thread_status;
2230 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2231 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
2232 	} else if (IS_IVYBRIDGE(i915)) {
2233 		u32 ecobus;
2234 
2235 		/* IVB configs may use multi-threaded forcewake */
2236 
2237 		/* A small trick here - if the bios hasn't configured
2238 		 * MT forcewake, and if the device is in RC6, then
2239 		 * force_wake_mt_get will not wake the device and the
2240 		 * ECOBUS read will return zero. Which will be
2241 		 * (correctly) interpreted by the test below as MT
2242 		 * forcewake being disabled.
2243 		 */
2244 		uncore->fw_get_funcs = &uncore_get_thread_status;
2245 
2246 		/* We need to init first for ECOBUS access and then
2247 		 * determine later if we want to reinit, in case of MT access is
2248 		 * not working. In this stage we don't know which flavour this
2249 		 * ivb is, so it is better to reset also the gen6 fw registers
2250 		 * before the ecobus check.
2251 		 */
2252 
2253 		__raw_uncore_write32(uncore, FORCEWAKE, 0);
2254 		__raw_posting_read(uncore, ECOBUS);
2255 
2256 		ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2257 				       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
2258 		if (ret)
2259 			goto out;
2260 
2261 		spin_lock_irq(&uncore->lock);
2262 		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
2263 		ecobus = __raw_uncore_read32(uncore, ECOBUS);
2264 		fw_domains_put(uncore, FORCEWAKE_RENDER);
2265 		spin_unlock_irq(&uncore->lock);
2266 
2267 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
2268 			drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
2269 			drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
2270 			fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
2271 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2272 				       FORCEWAKE, FORCEWAKE_ACK);
2273 		}
2274 	} else if (GRAPHICS_VER(i915) == 6) {
2275 		uncore->fw_get_funcs = &uncore_get_thread_status;
2276 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2277 			       FORCEWAKE, FORCEWAKE_ACK);
2278 	}
2279 
2280 #undef fw_domain_init
2281 
2282 	/* All future platforms are expected to require complex power gating */
2283 	drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
2284 
2285 out:
2286 	if (ret)
2287 		intel_uncore_fw_domains_fini(uncore);
2288 
2289 	return ret;
2290 }
2291 
2292 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
2293 { \
2294 	(uncore)->fw_domains_table = \
2295 			(struct intel_forcewake_range *)(d); \
2296 	(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2297 }
2298 
2299 #define ASSIGN_SHADOW_TABLE(uncore, d) \
2300 { \
2301 	(uncore)->shadowed_reg_table = d; \
2302 	(uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2303 }
2304 
2305 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2306 					 unsigned long action, void *data)
2307 {
2308 	struct intel_uncore *uncore = container_of(nb,
2309 			struct intel_uncore, pmic_bus_access_nb);
2310 
2311 	switch (action) {
2312 	case MBI_PMIC_BUS_ACCESS_BEGIN:
2313 		/*
2314 		 * forcewake all now to make sure that we don't need to do a
2315 		 * forcewake later which on systems where this notifier gets
2316 		 * called requires the punit to access to the shared pmic i2c
2317 		 * bus, which will be busy after this notification, leading to:
2318 		 * "render: timed out waiting for forcewake ack request."
2319 		 * errors.
2320 		 *
2321 		 * The notifier is unregistered during intel_runtime_suspend(),
2322 		 * so it's ok to access the HW here without holding a RPM
2323 		 * wake reference -> disable wakeref asserts for the time of
2324 		 * the access.
2325 		 */
2326 		disable_rpm_wakeref_asserts(uncore->rpm);
2327 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2328 		enable_rpm_wakeref_asserts(uncore->rpm);
2329 		break;
2330 	case MBI_PMIC_BUS_ACCESS_END:
2331 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2332 		break;
2333 	}
2334 
2335 	return NOTIFY_OK;
2336 }
2337 
2338 static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
2339 {
2340 	iounmap((void __iomem *)regs);
2341 }
2342 
2343 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
2344 {
2345 	struct drm_i915_private *i915 = uncore->i915;
2346 	int mmio_size;
2347 
2348 	/*
2349 	 * Before gen4, the registers and the GTT are behind different BARs.
2350 	 * However, from gen4 onwards, the registers and the GTT are shared
2351 	 * in the same BAR, so we want to restrict this ioremap from
2352 	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2353 	 * the register BAR remains the same size for all the earlier
2354 	 * generations up to Ironlake.
2355 	 * For dgfx chips register range is expanded to 4MB, and this larger
2356 	 * range is also used for integrated gpus beginning with Meteor Lake.
2357 	 */
2358 	if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2359 		mmio_size = 4 * 1024 * 1024;
2360 	else if (GRAPHICS_VER(i915) >= 5)
2361 		mmio_size = 2 * 1024 * 1024;
2362 	else
2363 		mmio_size = 512 * 1024;
2364 
2365 	uncore->regs = ioremap(phys_addr, mmio_size);
2366 	if (uncore->regs == NULL) {
2367 		drm_err(&i915->drm, "failed to map registers\n");
2368 		return -EIO;
2369 	}
2370 
2371 	return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio,
2372 					(void __force *)uncore->regs);
2373 }
2374 
2375 void intel_uncore_init_early(struct intel_uncore *uncore,
2376 			     struct intel_gt *gt)
2377 {
2378 	spin_lock_init(&uncore->lock);
2379 	uncore->i915 = gt->i915;
2380 	uncore->gt = gt;
2381 	uncore->rpm = &gt->i915->runtime_pm;
2382 }
2383 
2384 static void uncore_raw_init(struct intel_uncore *uncore)
2385 {
2386 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2387 
2388 	if (intel_vgpu_active(uncore->i915)) {
2389 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2390 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2391 	} else if (GRAPHICS_VER(uncore->i915) == 5) {
2392 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2393 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2394 	} else {
2395 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2396 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2397 	}
2398 }
2399 
2400 static int uncore_media_forcewake_init(struct intel_uncore *uncore)
2401 {
2402 	struct drm_i915_private *i915 = uncore->i915;
2403 
2404 	if (MEDIA_VER(i915) >= 13) {
2405 		ASSIGN_FW_DOMAINS_TABLE(uncore, __xelpmp_fw_ranges);
2406 		ASSIGN_SHADOW_TABLE(uncore, xelpmp_shadowed_regs);
2407 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2408 	} else {
2409 		MISSING_CASE(MEDIA_VER(i915));
2410 		return -ENODEV;
2411 	}
2412 
2413 	return 0;
2414 }
2415 
2416 static int uncore_forcewake_init(struct intel_uncore *uncore)
2417 {
2418 	struct drm_i915_private *i915 = uncore->i915;
2419 	int ret;
2420 
2421 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2422 
2423 	ret = intel_uncore_fw_domains_init(uncore);
2424 	if (ret)
2425 		return ret;
2426 	forcewake_early_sanitize(uncore, 0);
2427 
2428 	ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2429 
2430 	if (uncore->gt->type == GT_MEDIA)
2431 		return uncore_media_forcewake_init(uncore);
2432 
2433 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
2434 		ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
2435 		ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
2436 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2437 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2438 		ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2439 		ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2440 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2441 	} else if (GRAPHICS_VER(i915) >= 12) {
2442 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2443 		ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2444 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2445 	} else if (GRAPHICS_VER(i915) == 11) {
2446 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2447 		ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2448 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2449 	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2450 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2451 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2452 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2453 	} else if (IS_CHERRYVIEW(i915)) {
2454 		ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2455 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2456 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2457 	} else if (GRAPHICS_VER(i915) == 8) {
2458 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2459 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2460 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2461 	} else if (IS_VALLEYVIEW(i915)) {
2462 		ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2463 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2464 	} else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2465 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2466 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2467 	}
2468 
2469 	uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2470 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2471 
2472 	return 0;
2473 }
2474 
2475 static int sanity_check_mmio_access(struct intel_uncore *uncore)
2476 {
2477 	struct drm_i915_private *i915 = uncore->i915;
2478 
2479 	if (GRAPHICS_VER(i915) < 8)
2480 		return 0;
2481 
2482 	/*
2483 	 * Sanitycheck that MMIO access to the device is working properly.  If
2484 	 * the CPU is unable to communicate with a PCI device, BAR reads will
2485 	 * return 0xFFFFFFFF.  Let's make sure the device isn't in this state
2486 	 * before we start trying to access registers.
2487 	 *
2488 	 * We use the primary GT's forcewake register as our guinea pig since
2489 	 * it's been around since HSW and it's a masked register so the upper
2490 	 * 16 bits can never read back as 1's if device access is operating
2491 	 * properly.
2492 	 *
2493 	 * If MMIO isn't working, we'll wait up to 2 seconds to see if it
2494 	 * recovers, then give up.
2495 	 */
2496 #define COND (__raw_uncore_read32(uncore, FORCEWAKE_MT) != ~0)
2497 	if (wait_for(COND, 2000) == -ETIMEDOUT) {
2498 		drm_err(&i915->drm, "Device is non-operational; MMIO access returns 0xFFFFFFFF!\n");
2499 		return -EIO;
2500 	}
2501 
2502 	return 0;
2503 }
2504 
2505 int intel_uncore_init_mmio(struct intel_uncore *uncore)
2506 {
2507 	struct drm_i915_private *i915 = uncore->i915;
2508 	struct intel_display *display = i915->display;
2509 	int ret;
2510 
2511 	ret = sanity_check_mmio_access(uncore);
2512 	if (ret)
2513 		return ret;
2514 
2515 	/*
2516 	 * The boot firmware initializes local memory and assesses its health.
2517 	 * If memory training fails, the punit will have been instructed to
2518 	 * keep the GT powered down; we won't be able to communicate with it
2519 	 * and we should not continue with driver initialization.
2520 	 */
2521 	if (IS_DGFX(i915) &&
2522 	    !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2523 		drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2524 		return -ENODEV;
2525 	}
2526 
2527 	if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2528 		uncore->flags |= UNCORE_HAS_FORCEWAKE;
2529 
2530 	if (!intel_uncore_has_forcewake(uncore)) {
2531 		uncore_raw_init(uncore);
2532 	} else {
2533 		ret = uncore_forcewake_init(uncore);
2534 		if (ret)
2535 			return ret;
2536 	}
2537 
2538 	/* make sure fw funcs are set if and only if we have fw*/
2539 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
2540 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2541 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2542 
2543 	if (HAS_FPGA_DBG_UNCLAIMED(display))
2544 		uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2545 
2546 	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2547 		uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2548 
2549 	if (IS_GRAPHICS_VER(i915, 6, 7))
2550 		uncore->flags |= UNCORE_HAS_FIFO;
2551 
2552 	/* clear out unclaimed reg detection bit */
2553 	if (intel_uncore_unclaimed_mmio(uncore))
2554 		drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2555 
2556 	return 0;
2557 }
2558 
2559 /*
2560  * We might have detected that some engines are fused off after we initialized
2561  * the forcewake domains. Prune them, to make sure they only reference existing
2562  * engines.
2563  */
2564 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2565 					  struct intel_gt *gt)
2566 {
2567 	enum forcewake_domains fw_domains = uncore->fw_domains;
2568 	enum forcewake_domain_id domain_id;
2569 	int i;
2570 
2571 	if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2572 		return;
2573 
2574 	for (i = 0; i < I915_MAX_VCS; i++) {
2575 		domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2576 
2577 		if (HAS_ENGINE(gt, _VCS(i)))
2578 			continue;
2579 
2580 		/*
2581 		 * Starting with XeHP, the power well for an even-numbered
2582 		 * VDBOX is also used for shared units within the
2583 		 * media slice such as SFC.  So even if the engine
2584 		 * itself is fused off, we still need to initialize
2585 		 * the forcewake domain if any of the other engines
2586 		 * in the same media slice are present.
2587 		 */
2588 		if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) {
2589 			if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2590 				continue;
2591 
2592 			if (HAS_ENGINE(gt, _VECS(i / 2)))
2593 				continue;
2594 		}
2595 
2596 		if (fw_domains & BIT(domain_id))
2597 			fw_domain_fini(uncore, domain_id);
2598 	}
2599 
2600 	for (i = 0; i < I915_MAX_VECS; i++) {
2601 		domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2602 
2603 		if (HAS_ENGINE(gt, _VECS(i)))
2604 			continue;
2605 
2606 		if (fw_domains & BIT(domain_id))
2607 			fw_domain_fini(uncore, domain_id);
2608 	}
2609 
2610 	if ((fw_domains & BIT(FW_DOMAIN_ID_GSC)) && !HAS_ENGINE(gt, GSC0))
2611 		fw_domain_fini(uncore, FW_DOMAIN_ID_GSC);
2612 }
2613 
2614 /*
2615  * The driver-initiated FLR is the highest level of reset that we can trigger
2616  * from within the driver. It is different from the PCI FLR in that it doesn't
2617  * fully reset the SGUnit and doesn't modify the PCI config space and therefore
2618  * it doesn't require a re-enumeration of the PCI BARs. However, the
2619  * driver-initiated FLR does still cause a reset of both GT and display and a
2620  * memory wipe of local and stolen memory, so recovery would require a full HW
2621  * re-init and saving/restoring (or re-populating) the wiped memory. Since we
2622  * perform the FLR as the very last action before releasing access to the HW
2623  * during the driver release flow, we don't attempt recovery at all, because
2624  * if/when a new instance of i915 is bound to the device it will do a full
2625  * re-init anyway.
2626  */
2627 static void driver_initiated_flr(struct intel_uncore *uncore)
2628 {
2629 	struct drm_i915_private *i915 = uncore->i915;
2630 	unsigned int flr_timeout_ms;
2631 	int ret;
2632 
2633 	drm_dbg(&i915->drm, "Triggering Driver-FLR\n");
2634 
2635 	/*
2636 	 * The specification recommends a 3 seconds FLR reset timeout. To be
2637 	 * cautious, we will extend this to 9 seconds, three times the specified
2638 	 * timeout.
2639 	 */
2640 	flr_timeout_ms = 9000;
2641 
2642 	/*
2643 	 * Make sure any pending FLR requests have cleared by waiting for the
2644 	 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
2645 	 * to make sure it's not still set from a prior attempt (it's a write to
2646 	 * clear bit).
2647 	 * Note that we should never be in a situation where a previous attempt
2648 	 * is still pending (unless the HW is totally dead), but better to be
2649 	 * safe in case something unexpected happens
2650 	 */
2651 	ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms, NULL);
2652 	if (ret) {
2653 		drm_err(&i915->drm,
2654 			"Failed to wait for Driver-FLR bit to clear! %d\n",
2655 			ret);
2656 		return;
2657 	}
2658 	intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2659 
2660 	/* Trigger the actual Driver-FLR */
2661 	intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
2662 
2663 	/* Wait for hardware teardown to complete */
2664 	ret = intel_wait_for_register_fw(uncore, GU_CNTL,
2665 					 DRIVERFLR, 0,
2666 					 flr_timeout_ms, NULL);
2667 	if (ret) {
2668 		drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
2669 		return;
2670 	}
2671 
2672 	/* Wait for hardware/firmware re-init to complete */
2673 	ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
2674 					 DRIVERFLR_STATUS, DRIVERFLR_STATUS,
2675 					 flr_timeout_ms, NULL);
2676 	if (ret) {
2677 		drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
2678 		return;
2679 	}
2680 
2681 	/* Clear sticky completion status */
2682 	intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2683 }
2684 
2685 /* Called via drm-managed action */
2686 void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
2687 {
2688 	struct intel_uncore *uncore = data;
2689 
2690 	if (intel_uncore_has_forcewake(uncore)) {
2691 		iosf_mbi_punit_acquire();
2692 		iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2693 			&uncore->pmic_bus_access_nb);
2694 		intel_uncore_forcewake_reset(uncore);
2695 		intel_uncore_fw_domains_fini(uncore);
2696 		iosf_mbi_punit_release();
2697 	}
2698 
2699 	if (intel_uncore_needs_flr_on_fini(uncore))
2700 		driver_initiated_flr(uncore);
2701 }
2702 
2703 /**
2704  * __intel_wait_for_register_fw - wait until register matches expected state
2705  * @uncore: the struct intel_uncore
2706  * @reg: the register to read
2707  * @mask: mask to apply to register value
2708  * @value: expected value
2709  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2710  * @slow_timeout_ms: slow timeout in millisecond
2711  * @out_value: optional placeholder to hold registry value
2712  *
2713  * This routine waits until the target register @reg contains the expected
2714  * @value after applying the @mask, i.e. it waits until ::
2715  *
2716  *     (intel_uncore_read_fw(uncore, reg) & mask) == value
2717  *
2718  * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2719  * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2720  * must be not larger than 20,0000 microseconds.
2721  *
2722  * Note that this routine assumes the caller holds forcewake asserted, it is
2723  * not suitable for very long waits. See intel_wait_for_register() if you
2724  * wish to wait without holding forcewake for the duration (i.e. you expect
2725  * the wait to be slow).
2726  *
2727  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2728  */
2729 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2730 				 i915_reg_t reg,
2731 				 u32 mask,
2732 				 u32 value,
2733 				 unsigned int fast_timeout_us,
2734 				 unsigned int slow_timeout_ms,
2735 				 u32 *out_value)
2736 {
2737 	u32 reg_value = 0;
2738 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2739 	int ret;
2740 
2741 	/* Catch any overuse of this function */
2742 	might_sleep_if(slow_timeout_ms);
2743 	GEM_BUG_ON(fast_timeout_us > 20000);
2744 	GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2745 
2746 	ret = -ETIMEDOUT;
2747 	if (fast_timeout_us && fast_timeout_us <= 20000)
2748 		ret = _wait_for_atomic(done, fast_timeout_us, 0);
2749 	if (ret && slow_timeout_ms)
2750 		ret = wait_for(done, slow_timeout_ms);
2751 
2752 	if (out_value)
2753 		*out_value = reg_value;
2754 
2755 	return ret;
2756 #undef done
2757 }
2758 
2759 /**
2760  * __intel_wait_for_register - wait until register matches expected state
2761  * @uncore: the struct intel_uncore
2762  * @reg: the register to read
2763  * @mask: mask to apply to register value
2764  * @value: expected value
2765  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2766  * @slow_timeout_ms: slow timeout in millisecond
2767  * @out_value: optional placeholder to hold registry value
2768  *
2769  * This routine waits until the target register @reg contains the expected
2770  * @value after applying the @mask, i.e. it waits until ::
2771  *
2772  *     (intel_uncore_read(uncore, reg) & mask) == value
2773  *
2774  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2775  *
2776  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2777  */
2778 int __intel_wait_for_register(struct intel_uncore *uncore,
2779 			      i915_reg_t reg,
2780 			      u32 mask,
2781 			      u32 value,
2782 			      unsigned int fast_timeout_us,
2783 			      unsigned int slow_timeout_ms,
2784 			      u32 *out_value)
2785 {
2786 	unsigned fw =
2787 		intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2788 	u32 reg_value;
2789 	int ret;
2790 
2791 	might_sleep_if(slow_timeout_ms);
2792 
2793 	spin_lock_irq(&uncore->lock);
2794 	intel_uncore_forcewake_get__locked(uncore, fw);
2795 
2796 	ret = __intel_wait_for_register_fw(uncore,
2797 					   reg, mask, value,
2798 					   fast_timeout_us, 0, &reg_value);
2799 
2800 	intel_uncore_forcewake_put__locked(uncore, fw);
2801 	spin_unlock_irq(&uncore->lock);
2802 
2803 	if (ret && slow_timeout_ms)
2804 		ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2805 								       reg),
2806 				 (reg_value & mask) == value,
2807 				 slow_timeout_ms * 1000, 10, 1000);
2808 
2809 	/* just trace the final value */
2810 	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2811 
2812 	if (out_value)
2813 		*out_value = reg_value;
2814 
2815 	return ret;
2816 }
2817 
2818 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2819 {
2820 	bool ret;
2821 
2822 	if (!uncore->debug)
2823 		return false;
2824 
2825 	spin_lock_irq(&uncore->debug->lock);
2826 	ret = check_for_unclaimed_mmio(uncore);
2827 	spin_unlock_irq(&uncore->debug->lock);
2828 
2829 	return ret;
2830 }
2831 
2832 bool
2833 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2834 {
2835 	bool ret = false;
2836 
2837 	if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
2838 		return false;
2839 
2840 	spin_lock_irq(&uncore->debug->lock);
2841 
2842 	if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2843 		goto out;
2844 
2845 	if (unlikely(check_for_unclaimed_mmio(uncore))) {
2846 		if (!uncore->i915->params.mmio_debug) {
2847 			drm_dbg(&uncore->i915->drm,
2848 				"Unclaimed register detected, "
2849 				"enabling oneshot unclaimed register reporting. "
2850 				"Please use i915.mmio_debug=N for more information.\n");
2851 			uncore->i915->params.mmio_debug++;
2852 		}
2853 		uncore->debug->unclaimed_mmio_check--;
2854 		ret = true;
2855 	}
2856 
2857 out:
2858 	spin_unlock_irq(&uncore->debug->lock);
2859 
2860 	return ret;
2861 }
2862 
2863 /**
2864  * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2865  * 				    a register
2866  * @uncore: pointer to struct intel_uncore
2867  * @reg: register in question
2868  * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2869  *
2870  * Returns a set of forcewake domains required to be taken with for example
2871  * intel_uncore_forcewake_get for the specified register to be accessible in the
2872  * specified mode (read, write or read/write) with raw mmio accessors.
2873  *
2874  * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2875  * callers to do FIFO management on their own or risk losing writes.
2876  */
2877 enum forcewake_domains
2878 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2879 			       i915_reg_t reg, unsigned int op)
2880 {
2881 	enum forcewake_domains fw_domains = 0;
2882 
2883 	drm_WARN_ON(&uncore->i915->drm, !op);
2884 
2885 	if (!intel_uncore_has_forcewake(uncore))
2886 		return 0;
2887 
2888 	if (op & FW_REG_READ)
2889 		fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2890 
2891 	if (op & FW_REG_WRITE)
2892 		fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2893 
2894 	drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2895 
2896 	return fw_domains;
2897 }
2898 
2899 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2900 #include "selftests/mock_uncore.c"
2901 #include "selftests/intel_uncore.c"
2902 #endif
2903