xref: /linux/drivers/gpu/drm/i915/intel_uncore.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 /*
2  * Copyright © 2013 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <linux/pm_runtime.h>
25 
26 #include <drm/drm_managed.h>
27 #include <drm/drm_print.h>
28 
29 #include "display/intel_display_core.h"
30 #include "gt/intel_engine_regs.h"
31 #include "gt/intel_gt.h"
32 #include "gt/intel_gt_regs.h"
33 
34 #include "i915_drv.h"
35 #include "i915_iosf_mbi.h"
36 #include "i915_reg.h"
37 #include "i915_vgpu.h"
38 #include "i915_wait_util.h"
39 #include "intel_uncore_trace.h"
40 
41 #define FORCEWAKE_ACK_TIMEOUT_MS 50
42 #define GT_FIFO_TIMEOUT_MS	 10
43 
44 struct intel_uncore *to_intel_uncore(struct drm_device *drm)
45 {
46 	return &to_i915(drm)->uncore;
47 }
48 
49 #define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
50 
51 static void
52 fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
53 {
54 	uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
55 }
56 
57 void
58 intel_uncore_mmio_debug_init_early(struct drm_i915_private *i915)
59 {
60 	spin_lock_init(&i915->mmio_debug.lock);
61 	i915->mmio_debug.unclaimed_mmio_check = 1;
62 
63 	i915->uncore.debug = &i915->mmio_debug;
64 }
65 
66 static void mmio_debug_suspend(struct intel_uncore *uncore)
67 {
68 	if (!uncore->debug)
69 		return;
70 
71 	spin_lock(&uncore->debug->lock);
72 
73 	/* Save and disable mmio debugging for the user bypass */
74 	if (!uncore->debug->suspend_count++) {
75 		uncore->debug->saved_mmio_check = uncore->debug->unclaimed_mmio_check;
76 		uncore->debug->unclaimed_mmio_check = 0;
77 	}
78 
79 	spin_unlock(&uncore->debug->lock);
80 }
81 
82 static bool check_for_unclaimed_mmio(struct intel_uncore *uncore);
83 
84 static void mmio_debug_resume(struct intel_uncore *uncore)
85 {
86 	if (!uncore->debug)
87 		return;
88 
89 	spin_lock(&uncore->debug->lock);
90 
91 	if (!--uncore->debug->suspend_count)
92 		uncore->debug->unclaimed_mmio_check = uncore->debug->saved_mmio_check;
93 
94 	if (check_for_unclaimed_mmio(uncore))
95 		drm_info(&uncore->i915->drm,
96 			 "Invalid mmio detected during user access\n");
97 
98 	spin_unlock(&uncore->debug->lock);
99 }
100 
101 static const char * const forcewake_domain_names[] = {
102 	"render",
103 	"gt",
104 	"media",
105 	"vdbox0",
106 	"vdbox1",
107 	"vdbox2",
108 	"vdbox3",
109 	"vdbox4",
110 	"vdbox5",
111 	"vdbox6",
112 	"vdbox7",
113 	"vebox0",
114 	"vebox1",
115 	"vebox2",
116 	"vebox3",
117 	"gsc",
118 };
119 
120 const char *
121 intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
122 {
123 	BUILD_BUG_ON(ARRAY_SIZE(forcewake_domain_names) != FW_DOMAIN_ID_COUNT);
124 
125 	if (id >= 0 && id < FW_DOMAIN_ID_COUNT)
126 		return forcewake_domain_names[id];
127 
128 	WARN_ON(id);
129 
130 	return "unknown";
131 }
132 
133 #define fw_ack(d) readl((d)->reg_ack)
134 #define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
135 #define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
136 
137 static inline void
138 fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
139 {
140 	/*
141 	 * We don't really know if the powerwell for the forcewake domain we are
142 	 * trying to reset here does exist at this point (engines could be fused
143 	 * off in ICL+), so no waiting for acks
144 	 */
145 	/* WaRsClearFWBitsAtReset */
146 	if (GRAPHICS_VER(d->uncore->i915) >= 12)
147 		fw_clear(d, 0xefff);
148 	else
149 		fw_clear(d, 0xffff);
150 }
151 
152 static inline void
153 fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
154 {
155 	GEM_BUG_ON(d->uncore->fw_domains_timer & d->mask);
156 	d->uncore->fw_domains_timer |= d->mask;
157 	d->wake_count++;
158 	hrtimer_start_range_ns(&d->timer,
159 			       NSEC_PER_MSEC,
160 			       NSEC_PER_MSEC,
161 			       HRTIMER_MODE_REL);
162 }
163 
164 static inline int
165 __wait_for_ack(const struct intel_uncore_forcewake_domain *d,
166 	       const u32 ack,
167 	       const u32 value)
168 {
169 	return wait_for_atomic((fw_ack(d) & ack) == value,
170 			       FORCEWAKE_ACK_TIMEOUT_MS);
171 }
172 
173 static inline int
174 wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
175 	       const u32 ack)
176 {
177 	return __wait_for_ack(d, ack, 0);
178 }
179 
180 static inline int
181 wait_ack_set(const struct intel_uncore_forcewake_domain *d,
182 	     const u32 ack)
183 {
184 	return __wait_for_ack(d, ack, ack);
185 }
186 
187 static inline void
188 fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
189 {
190 	if (!wait_ack_clear(d, FORCEWAKE_KERNEL))
191 		return;
192 
193 	if (fw_ack(d) == ~0) {
194 		drm_err(&d->uncore->i915->drm,
195 			"%s: MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
196 			intel_uncore_forcewake_domain_to_str(d->id));
197 		intel_gt_set_wedged_async(d->uncore->gt);
198 	} else {
199 		drm_err(&d->uncore->i915->drm,
200 			"%s: timed out waiting for forcewake ack to clear.\n",
201 			intel_uncore_forcewake_domain_to_str(d->id));
202 	}
203 
204 	add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
205 }
206 
207 enum ack_type {
208 	ACK_CLEAR = 0,
209 	ACK_SET
210 };
211 
212 static int
213 fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
214 				 const enum ack_type type)
215 {
216 	const u32 ack_bit = FORCEWAKE_KERNEL;
217 	const u32 value = type == ACK_SET ? ack_bit : 0;
218 	unsigned int pass;
219 	bool ack_detected;
220 
221 	/*
222 	 * There is a possibility of driver's wake request colliding
223 	 * with hardware's own wake requests and that can cause
224 	 * hardware to not deliver the driver's ack message.
225 	 *
226 	 * Use a fallback bit toggle to kick the gpu state machine
227 	 * in the hope that the original ack will be delivered along with
228 	 * the fallback ack.
229 	 *
230 	 * This workaround is described in HSDES #1604254524 and it's known as:
231 	 * WaRsForcewakeAddDelayForAck:skl,bxt,kbl,glk,cfl,cnl,icl
232 	 * although the name is a bit misleading.
233 	 */
234 
235 	pass = 1;
236 	do {
237 		wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
238 
239 		fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
240 		/* Give gt some time to relax before the polling frenzy */
241 		udelay(10 * pass);
242 		wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
243 
244 		ack_detected = (fw_ack(d) & ack_bit) == value;
245 
246 		fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
247 	} while (!ack_detected && pass++ < 10);
248 
249 	drm_dbg(&d->uncore->i915->drm,
250 		"%s had to use fallback to %s ack, 0x%x (passes %u)\n",
251 		intel_uncore_forcewake_domain_to_str(d->id),
252 		type == ACK_SET ? "set" : "clear",
253 		fw_ack(d),
254 		pass);
255 
256 	return ack_detected ? 0 : -ETIMEDOUT;
257 }
258 
259 static inline void
260 fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
261 {
262 	if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
263 		return;
264 
265 	if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
266 		fw_domain_wait_ack_clear(d);
267 }
268 
269 static inline void
270 fw_domain_get(const struct intel_uncore_forcewake_domain *d)
271 {
272 	fw_set(d, FORCEWAKE_KERNEL);
273 }
274 
275 static inline void
276 fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
277 {
278 	if (wait_ack_set(d, FORCEWAKE_KERNEL)) {
279 		drm_err(&d->uncore->i915->drm,
280 			"%s: timed out waiting for forcewake ack request.\n",
281 			intel_uncore_forcewake_domain_to_str(d->id));
282 		add_taint_for_CI(d->uncore->i915, TAINT_WARN); /* CI now unreliable */
283 	}
284 }
285 
286 static inline void
287 fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
288 {
289 	if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
290 		return;
291 
292 	if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
293 		fw_domain_wait_ack_set(d);
294 }
295 
296 static inline void
297 fw_domain_put(const struct intel_uncore_forcewake_domain *d)
298 {
299 	fw_clear(d, FORCEWAKE_KERNEL);
300 }
301 
302 static void
303 fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
304 {
305 	struct intel_uncore_forcewake_domain *d;
306 	unsigned int tmp;
307 
308 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
309 
310 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
311 		fw_domain_wait_ack_clear(d);
312 		fw_domain_get(d);
313 	}
314 
315 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
316 		fw_domain_wait_ack_set(d);
317 
318 	uncore->fw_domains_active |= fw_domains;
319 }
320 
321 static void
322 fw_domains_get_with_fallback(struct intel_uncore *uncore,
323 			     enum forcewake_domains fw_domains)
324 {
325 	struct intel_uncore_forcewake_domain *d;
326 	unsigned int tmp;
327 
328 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
329 
330 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
331 		fw_domain_wait_ack_clear_fallback(d);
332 		fw_domain_get(d);
333 	}
334 
335 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
336 		fw_domain_wait_ack_set_fallback(d);
337 
338 	uncore->fw_domains_active |= fw_domains;
339 }
340 
341 static void
342 fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
343 {
344 	struct intel_uncore_forcewake_domain *d;
345 	unsigned int tmp;
346 
347 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
348 
349 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
350 		fw_domain_put(d);
351 
352 	uncore->fw_domains_active &= ~fw_domains;
353 }
354 
355 static void
356 fw_domains_reset(struct intel_uncore *uncore,
357 		 enum forcewake_domains fw_domains)
358 {
359 	struct intel_uncore_forcewake_domain *d;
360 	unsigned int tmp;
361 
362 	if (!fw_domains)
363 		return;
364 
365 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
366 
367 	for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
368 		fw_domain_reset(d);
369 }
370 
371 static inline u32 gt_thread_status(struct intel_uncore *uncore)
372 {
373 	u32 val;
374 
375 	val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
376 	val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
377 
378 	return val;
379 }
380 
381 static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
382 {
383 	/*
384 	 * w/a for a sporadic read returning 0 by waiting for the GT
385 	 * thread to wake up.
386 	 */
387 	drm_WARN_ONCE(&uncore->i915->drm,
388 		      wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
389 		      "GT thread status wait timed out\n");
390 }
391 
392 static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
393 					      enum forcewake_domains fw_domains)
394 {
395 	fw_domains_get_normal(uncore, fw_domains);
396 
397 	/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
398 	__gen6_gt_wait_for_thread_c0(uncore);
399 }
400 
401 static inline u32 fifo_free_entries(struct intel_uncore *uncore)
402 {
403 	u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
404 
405 	return count & GT_FIFO_FREE_ENTRIES_MASK;
406 }
407 
408 static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
409 {
410 	u32 n;
411 
412 	/* On VLV, FIFO will be shared by both SW and HW.
413 	 * So, we need to read the FREE_ENTRIES everytime */
414 	if (IS_VALLEYVIEW(uncore->i915))
415 		n = fifo_free_entries(uncore);
416 	else
417 		n = uncore->fifo_count;
418 
419 	if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
420 		if (wait_for_atomic((n = fifo_free_entries(uncore)) >
421 				    GT_FIFO_NUM_RESERVED_ENTRIES,
422 				    GT_FIFO_TIMEOUT_MS)) {
423 			drm_dbg(&uncore->i915->drm,
424 				"GT_FIFO timeout, entries: %u\n", n);
425 			return;
426 		}
427 	}
428 
429 	uncore->fifo_count = n - 1;
430 }
431 
432 static enum hrtimer_restart
433 intel_uncore_fw_release_timer(struct hrtimer *timer)
434 {
435 	struct intel_uncore_forcewake_domain *domain =
436 	       container_of(timer, struct intel_uncore_forcewake_domain, timer);
437 	struct intel_uncore *uncore = domain->uncore;
438 	unsigned long irqflags;
439 
440 	assert_rpm_device_not_suspended(uncore->rpm);
441 
442 	if (xchg(&domain->active, false))
443 		return HRTIMER_RESTART;
444 
445 	spin_lock_irqsave(&uncore->lock, irqflags);
446 
447 	uncore->fw_domains_timer &= ~domain->mask;
448 
449 	GEM_BUG_ON(!domain->wake_count);
450 	if (--domain->wake_count == 0)
451 		fw_domains_put(uncore, domain->mask);
452 
453 	spin_unlock_irqrestore(&uncore->lock, irqflags);
454 
455 	return HRTIMER_NORESTART;
456 }
457 
458 /* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
459 static unsigned int
460 intel_uncore_forcewake_reset(struct intel_uncore *uncore)
461 {
462 	unsigned long irqflags;
463 	struct intel_uncore_forcewake_domain *domain;
464 	int retry_count = 100;
465 	enum forcewake_domains fw, active_domains;
466 
467 	iosf_mbi_assert_punit_acquired();
468 
469 	/* Hold uncore.lock across reset to prevent any register access
470 	 * with forcewake not set correctly. Wait until all pending
471 	 * timers are run before holding.
472 	 */
473 	while (1) {
474 		unsigned int tmp;
475 
476 		active_domains = 0;
477 
478 		for_each_fw_domain(domain, uncore, tmp) {
479 			smp_store_mb(domain->active, false);
480 			if (hrtimer_cancel(&domain->timer) == 0)
481 				continue;
482 
483 			intel_uncore_fw_release_timer(&domain->timer);
484 		}
485 
486 		spin_lock_irqsave(&uncore->lock, irqflags);
487 
488 		for_each_fw_domain(domain, uncore, tmp) {
489 			if (hrtimer_active(&domain->timer))
490 				active_domains |= domain->mask;
491 		}
492 
493 		if (active_domains == 0)
494 			break;
495 
496 		if (--retry_count == 0) {
497 			drm_err(&uncore->i915->drm, "Timed out waiting for forcewake timers to finish\n");
498 			break;
499 		}
500 
501 		spin_unlock_irqrestore(&uncore->lock, irqflags);
502 		cond_resched();
503 	}
504 
505 	drm_WARN_ON(&uncore->i915->drm, active_domains);
506 
507 	fw = uncore->fw_domains_active;
508 	if (fw)
509 		fw_domains_put(uncore, fw);
510 
511 	fw_domains_reset(uncore, uncore->fw_domains);
512 	assert_forcewakes_inactive(uncore);
513 
514 	spin_unlock_irqrestore(&uncore->lock, irqflags);
515 
516 	return fw; /* track the lost user forcewake domains */
517 }
518 
519 static bool
520 fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
521 {
522 	u32 dbg;
523 
524 	dbg = __raw_uncore_read32(uncore, FPGA_DBG);
525 	if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
526 		return false;
527 
528 	/*
529 	 * Bugs in PCI programming (or failing hardware) can occasionally cause
530 	 * us to lose access to the MMIO BAR.  When this happens, register
531 	 * reads will come back with 0xFFFFFFFF for every register and things
532 	 * go bad very quickly.  Let's try to detect that special case and at
533 	 * least try to print a more informative message about what has
534 	 * happened.
535 	 *
536 	 * During normal operation the FPGA_DBG register has several unused
537 	 * bits that will always read back as 0's so we can use them as canaries
538 	 * to recognize when MMIO accesses are just busted.
539 	 */
540 	if (unlikely(dbg == ~0))
541 		drm_err(&uncore->i915->drm,
542 			"Lost access to MMIO BAR; all registers now read back as 0xFFFFFFFF!\n");
543 
544 	__raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
545 
546 	return true;
547 }
548 
549 static bool
550 vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
551 {
552 	u32 cer;
553 
554 	cer = __raw_uncore_read32(uncore, CLAIM_ER);
555 	if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
556 		return false;
557 
558 	__raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
559 
560 	return true;
561 }
562 
563 static bool
564 gen6_check_for_fifo_debug(struct intel_uncore *uncore)
565 {
566 	u32 fifodbg;
567 
568 	fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
569 
570 	if (unlikely(fifodbg)) {
571 		drm_dbg(&uncore->i915->drm, "GTFIFODBG = 0x08%x\n", fifodbg);
572 		__raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
573 	}
574 
575 	return fifodbg;
576 }
577 
578 static bool
579 check_for_unclaimed_mmio(struct intel_uncore *uncore)
580 {
581 	bool ret = false;
582 
583 	lockdep_assert_held(&uncore->debug->lock);
584 
585 	if (uncore->debug->suspend_count)
586 		return false;
587 
588 	if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
589 		ret |= fpga_check_for_unclaimed_mmio(uncore);
590 
591 	if (intel_uncore_has_dbg_unclaimed(uncore))
592 		ret |= vlv_check_for_unclaimed_mmio(uncore);
593 
594 	if (intel_uncore_has_fifo(uncore))
595 		ret |= gen6_check_for_fifo_debug(uncore);
596 
597 	return ret;
598 }
599 
600 static void forcewake_early_sanitize(struct intel_uncore *uncore,
601 				     unsigned int restore_forcewake)
602 {
603 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
604 
605 	/* WaDisableShadowRegForCpd:chv */
606 	if (IS_CHERRYVIEW(uncore->i915)) {
607 		__raw_uncore_write32(uncore, GTFIFOCTL,
608 				     __raw_uncore_read32(uncore, GTFIFOCTL) |
609 				     GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
610 				     GT_FIFO_CTL_RC6_POLICY_STALL);
611 	}
612 
613 	iosf_mbi_punit_acquire();
614 	intel_uncore_forcewake_reset(uncore);
615 	if (restore_forcewake) {
616 		spin_lock_irq(&uncore->lock);
617 		fw_domains_get(uncore, restore_forcewake);
618 
619 		if (intel_uncore_has_fifo(uncore))
620 			uncore->fifo_count = fifo_free_entries(uncore);
621 		spin_unlock_irq(&uncore->lock);
622 	}
623 	iosf_mbi_punit_release();
624 }
625 
626 void intel_uncore_suspend(struct intel_uncore *uncore)
627 {
628 	if (!intel_uncore_has_forcewake(uncore))
629 		return;
630 
631 	iosf_mbi_punit_acquire();
632 	iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
633 		&uncore->pmic_bus_access_nb);
634 	uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
635 	iosf_mbi_punit_release();
636 }
637 
638 void intel_uncore_resume_early(struct intel_uncore *uncore)
639 {
640 	unsigned int restore_forcewake;
641 
642 	if (intel_uncore_unclaimed_mmio(uncore))
643 		drm_dbg(&uncore->i915->drm, "unclaimed mmio detected on resume, clearing\n");
644 
645 	if (!intel_uncore_has_forcewake(uncore))
646 		return;
647 
648 	restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
649 	forcewake_early_sanitize(uncore, restore_forcewake);
650 
651 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
652 }
653 
654 void intel_uncore_runtime_resume(struct intel_uncore *uncore)
655 {
656 	if (!intel_uncore_has_forcewake(uncore))
657 		return;
658 
659 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
660 }
661 
662 static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
663 					 enum forcewake_domains fw_domains)
664 {
665 	struct intel_uncore_forcewake_domain *domain;
666 	unsigned int tmp;
667 
668 	fw_domains &= uncore->fw_domains;
669 
670 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
671 		if (domain->wake_count++) {
672 			fw_domains &= ~domain->mask;
673 			domain->active = true;
674 		}
675 	}
676 
677 	if (fw_domains)
678 		fw_domains_get(uncore, fw_domains);
679 }
680 
681 /**
682  * intel_uncore_forcewake_get - grab forcewake domain references
683  * @uncore: the intel_uncore structure
684  * @fw_domains: forcewake domains to get reference on
685  *
686  * This function can be used get GT's forcewake domain references.
687  * Normal register access will handle the forcewake domains automatically.
688  * However if some sequence requires the GT to not power down a particular
689  * forcewake domains this function should be called at the beginning of the
690  * sequence. And subsequently the reference should be dropped by symmetric
691  * call to intel_unforce_forcewake_put(). Usually caller wants all the domains
692  * to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
693  */
694 void intel_uncore_forcewake_get(struct intel_uncore *uncore,
695 				enum forcewake_domains fw_domains)
696 {
697 	unsigned long irqflags;
698 
699 	if (!uncore->fw_get_funcs)
700 		return;
701 
702 	assert_rpm_wakelock_held(uncore->rpm);
703 
704 	spin_lock_irqsave(&uncore->lock, irqflags);
705 	__intel_uncore_forcewake_get(uncore, fw_domains);
706 	spin_unlock_irqrestore(&uncore->lock, irqflags);
707 }
708 
709 /**
710  * intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
711  * @uncore: the intel_uncore structure
712  *
713  * This function is a wrapper around intel_uncore_forcewake_get() to acquire
714  * the GT powerwell and in the process disable our debugging for the
715  * duration of userspace's bypass.
716  */
717 void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
718 {
719 	spin_lock_irq(&uncore->lock);
720 	if (!uncore->user_forcewake_count++) {
721 		intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
722 		mmio_debug_suspend(uncore);
723 	}
724 	spin_unlock_irq(&uncore->lock);
725 }
726 
727 /**
728  * intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
729  * @uncore: the intel_uncore structure
730  *
731  * This function complements intel_uncore_forcewake_user_get() and releases
732  * the GT powerwell taken on behalf of the userspace bypass.
733  */
734 void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
735 {
736 	spin_lock_irq(&uncore->lock);
737 	if (!--uncore->user_forcewake_count) {
738 		mmio_debug_resume(uncore);
739 		intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
740 	}
741 	spin_unlock_irq(&uncore->lock);
742 }
743 
744 /**
745  * intel_uncore_forcewake_get__locked - grab forcewake domain references
746  * @uncore: the intel_uncore structure
747  * @fw_domains: forcewake domains to get reference on
748  *
749  * See intel_uncore_forcewake_get(). This variant places the onus
750  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
751  */
752 void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
753 					enum forcewake_domains fw_domains)
754 {
755 	lockdep_assert_held(&uncore->lock);
756 
757 	if (!uncore->fw_get_funcs)
758 		return;
759 
760 	__intel_uncore_forcewake_get(uncore, fw_domains);
761 }
762 
763 static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
764 					 enum forcewake_domains fw_domains,
765 					 bool delayed)
766 {
767 	struct intel_uncore_forcewake_domain *domain;
768 	unsigned int tmp;
769 
770 	fw_domains &= uncore->fw_domains;
771 
772 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
773 		GEM_BUG_ON(!domain->wake_count);
774 
775 		if (--domain->wake_count) {
776 			domain->active = true;
777 			continue;
778 		}
779 
780 		if (delayed &&
781 		    !(domain->uncore->fw_domains_timer & domain->mask))
782 			fw_domain_arm_timer(domain);
783 		else
784 			fw_domains_put(uncore, domain->mask);
785 	}
786 }
787 
788 /**
789  * intel_uncore_forcewake_put - release a forcewake domain reference
790  * @uncore: the intel_uncore structure
791  * @fw_domains: forcewake domains to put references
792  *
793  * This function drops the device-level forcewakes for specified
794  * domains obtained by intel_uncore_forcewake_get().
795  */
796 void intel_uncore_forcewake_put(struct intel_uncore *uncore,
797 				enum forcewake_domains fw_domains)
798 {
799 	unsigned long irqflags;
800 
801 	if (!uncore->fw_get_funcs)
802 		return;
803 
804 	spin_lock_irqsave(&uncore->lock, irqflags);
805 	__intel_uncore_forcewake_put(uncore, fw_domains, false);
806 	spin_unlock_irqrestore(&uncore->lock, irqflags);
807 }
808 
809 void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
810 					enum forcewake_domains fw_domains)
811 {
812 	unsigned long irqflags;
813 
814 	if (!uncore->fw_get_funcs)
815 		return;
816 
817 	spin_lock_irqsave(&uncore->lock, irqflags);
818 	__intel_uncore_forcewake_put(uncore, fw_domains, true);
819 	spin_unlock_irqrestore(&uncore->lock, irqflags);
820 }
821 
822 /**
823  * intel_uncore_forcewake_flush - flush the delayed release
824  * @uncore: the intel_uncore structure
825  * @fw_domains: forcewake domains to flush
826  */
827 void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
828 				  enum forcewake_domains fw_domains)
829 {
830 	struct intel_uncore_forcewake_domain *domain;
831 	unsigned int tmp;
832 
833 	if (!uncore->fw_get_funcs)
834 		return;
835 
836 	fw_domains &= uncore->fw_domains;
837 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
838 		WRITE_ONCE(domain->active, false);
839 		if (hrtimer_cancel(&domain->timer))
840 			intel_uncore_fw_release_timer(&domain->timer);
841 	}
842 }
843 
844 /**
845  * intel_uncore_forcewake_put__locked - release forcewake domain references
846  * @uncore: the intel_uncore structure
847  * @fw_domains: forcewake domains to put references
848  *
849  * See intel_uncore_forcewake_put(). This variant places the onus
850  * on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
851  */
852 void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
853 					enum forcewake_domains fw_domains)
854 {
855 	lockdep_assert_held(&uncore->lock);
856 
857 	if (!uncore->fw_get_funcs)
858 		return;
859 
860 	__intel_uncore_forcewake_put(uncore, fw_domains, false);
861 }
862 
863 void assert_forcewakes_inactive(struct intel_uncore *uncore)
864 {
865 	if (!uncore->fw_get_funcs)
866 		return;
867 
868 	drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
869 		 "Expected all fw_domains to be inactive, but %08x are still on\n",
870 		 uncore->fw_domains_active);
871 }
872 
873 void assert_forcewakes_active(struct intel_uncore *uncore,
874 			      enum forcewake_domains fw_domains)
875 {
876 	struct intel_uncore_forcewake_domain *domain;
877 	unsigned int tmp;
878 
879 	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
880 		return;
881 
882 	if (!uncore->fw_get_funcs)
883 		return;
884 
885 	spin_lock_irq(&uncore->lock);
886 
887 	assert_rpm_wakelock_held(uncore->rpm);
888 
889 	fw_domains &= uncore->fw_domains;
890 	drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
891 		 "Expected %08x fw_domains to be active, but %08x are off\n",
892 		 fw_domains, fw_domains & ~uncore->fw_domains_active);
893 
894 	/*
895 	 * Check that the caller has an explicit wakeref and we don't mistake
896 	 * it for the auto wakeref.
897 	 */
898 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
899 		unsigned int actual = READ_ONCE(domain->wake_count);
900 		unsigned int expect = 1;
901 
902 		if (uncore->fw_domains_timer & domain->mask)
903 			expect++; /* pending automatic release */
904 
905 		if (drm_WARN(&uncore->i915->drm, actual < expect,
906 			     "Expected domain %d to be held awake by caller, count=%d\n",
907 			     domain->id, actual))
908 			break;
909 	}
910 
911 	spin_unlock_irq(&uncore->lock);
912 }
913 
914 /*
915  * We give fast paths for the really cool registers.  The second range includes
916  * media domains (and the GSC starting from Xe_LPM+)
917  */
918 #define NEEDS_FORCE_WAKE(reg) ({ \
919 	u32 __reg = (reg); \
920 	__reg < 0x40000 || __reg >= 0x116000; \
921 })
922 
923 static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
924 {
925 	if (offset < entry->start)
926 		return -1;
927 	else if (offset > entry->end)
928 		return 1;
929 	else
930 		return 0;
931 }
932 
933 /* Copied and "macroized" from lib/bsearch.c */
934 #define BSEARCH(key, base, num, cmp) ({                                 \
935 	unsigned int start__ = 0, end__ = (num);                        \
936 	typeof(base) result__ = NULL;                                   \
937 	while (start__ < end__) {                                       \
938 		unsigned int mid__ = start__ + (end__ - start__) / 2;   \
939 		int ret__ = (cmp)((key), (base) + mid__);               \
940 		if (ret__ < 0) {                                        \
941 			end__ = mid__;                                  \
942 		} else if (ret__ > 0) {                                 \
943 			start__ = mid__ + 1;                            \
944 		} else {                                                \
945 			result__ = (base) + mid__;                      \
946 			break;                                          \
947 		}                                                       \
948 	}                                                               \
949 	result__;                                                       \
950 })
951 
952 static enum forcewake_domains
953 find_fw_domain(struct intel_uncore *uncore, u32 offset)
954 {
955 	const struct intel_forcewake_range *entry;
956 
957 	if (IS_GSI_REG(offset))
958 		offset += uncore->gsi_offset;
959 
960 	entry = BSEARCH(offset,
961 			uncore->fw_domains_table,
962 			uncore->fw_domains_table_entries,
963 			fw_range_cmp);
964 
965 	if (!entry)
966 		return 0;
967 
968 	/*
969 	 * The list of FW domains depends on the SKU in gen11+ so we
970 	 * can't determine it statically. We use FORCEWAKE_ALL and
971 	 * translate it here to the list of available domains.
972 	 */
973 	if (entry->domains == FORCEWAKE_ALL)
974 		return uncore->fw_domains;
975 
976 	drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
977 		 "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
978 		 entry->domains & ~uncore->fw_domains, offset);
979 
980 	return entry->domains;
981 }
982 
983 /*
984  * Shadowed register tables describe special register ranges that i915 is
985  * allowed to write to without acquiring forcewake.  If these registers' power
986  * wells are down, the hardware will save values written by i915 to a shadow
987  * copy and automatically transfer them into the real register the next time
988  * the power well is woken up.  Shadowing only applies to writes; forcewake
989  * must still be acquired when reading from registers in these ranges.
990  *
991  * The documentation for shadowed registers is somewhat spotty on older
992  * platforms.  However missing registers from these lists is non-fatal; it just
993  * means we'll wake up the hardware for some register accesses where we didn't
994  * really need to.
995  *
996  * The ranges listed in these tables must be sorted by offset.
997  *
998  * When adding new tables here, please also add them to
999  * intel_shadow_table_check() in selftests/intel_uncore.c so that they will be
1000  * scanned for obvious mistakes or typos by the selftests.
1001  */
1002 
1003 static const struct i915_range gen8_shadowed_regs[] = {
1004 	{ .start =  0x2030, .end =  0x2030 },
1005 	{ .start =  0xA008, .end =  0xA00C },
1006 	{ .start = 0x12030, .end = 0x12030 },
1007 	{ .start = 0x1a030, .end = 0x1a030 },
1008 	{ .start = 0x22030, .end = 0x22030 },
1009 };
1010 
1011 static const struct i915_range gen11_shadowed_regs[] = {
1012 	{ .start =   0x2030, .end =   0x2030 },
1013 	{ .start =   0x2550, .end =   0x2550 },
1014 	{ .start =   0xA008, .end =   0xA00C },
1015 	{ .start =  0x22030, .end =  0x22030 },
1016 	{ .start =  0x22230, .end =  0x22230 },
1017 	{ .start =  0x22510, .end =  0x22550 },
1018 	{ .start = 0x1C0030, .end = 0x1C0030 },
1019 	{ .start = 0x1C0230, .end = 0x1C0230 },
1020 	{ .start = 0x1C0510, .end = 0x1C0550 },
1021 	{ .start = 0x1C4030, .end = 0x1C4030 },
1022 	{ .start = 0x1C4230, .end = 0x1C4230 },
1023 	{ .start = 0x1C4510, .end = 0x1C4550 },
1024 	{ .start = 0x1C8030, .end = 0x1C8030 },
1025 	{ .start = 0x1C8230, .end = 0x1C8230 },
1026 	{ .start = 0x1C8510, .end = 0x1C8550 },
1027 	{ .start = 0x1D0030, .end = 0x1D0030 },
1028 	{ .start = 0x1D0230, .end = 0x1D0230 },
1029 	{ .start = 0x1D0510, .end = 0x1D0550 },
1030 	{ .start = 0x1D4030, .end = 0x1D4030 },
1031 	{ .start = 0x1D4230, .end = 0x1D4230 },
1032 	{ .start = 0x1D4510, .end = 0x1D4550 },
1033 	{ .start = 0x1D8030, .end = 0x1D8030 },
1034 	{ .start = 0x1D8230, .end = 0x1D8230 },
1035 	{ .start = 0x1D8510, .end = 0x1D8550 },
1036 };
1037 
1038 static const struct i915_range gen12_shadowed_regs[] = {
1039 	{ .start =   0x2030, .end =   0x2030 },
1040 	{ .start =   0x2510, .end =   0x2550 },
1041 	{ .start =   0xA008, .end =   0xA00C },
1042 	{ .start =   0xA188, .end =   0xA188 },
1043 	{ .start =   0xA278, .end =   0xA278 },
1044 	{ .start =   0xA540, .end =   0xA56C },
1045 	{ .start =   0xC4C8, .end =   0xC4C8 },
1046 	{ .start =   0xC4D4, .end =   0xC4D4 },
1047 	{ .start =   0xC600, .end =   0xC600 },
1048 	{ .start =  0x22030, .end =  0x22030 },
1049 	{ .start =  0x22510, .end =  0x22550 },
1050 	{ .start = 0x1C0030, .end = 0x1C0030 },
1051 	{ .start = 0x1C0510, .end = 0x1C0550 },
1052 	{ .start = 0x1C4030, .end = 0x1C4030 },
1053 	{ .start = 0x1C4510, .end = 0x1C4550 },
1054 	{ .start = 0x1C8030, .end = 0x1C8030 },
1055 	{ .start = 0x1C8510, .end = 0x1C8550 },
1056 	{ .start = 0x1D0030, .end = 0x1D0030 },
1057 	{ .start = 0x1D0510, .end = 0x1D0550 },
1058 	{ .start = 0x1D4030, .end = 0x1D4030 },
1059 	{ .start = 0x1D4510, .end = 0x1D4550 },
1060 	{ .start = 0x1D8030, .end = 0x1D8030 },
1061 	{ .start = 0x1D8510, .end = 0x1D8550 },
1062 
1063 	/*
1064 	 * The rest of these ranges are specific to Xe_HP and beyond, but
1065 	 * are reserved/unused ranges on earlier gen12 platforms, so they can
1066 	 * be safely added to the gen12 table.
1067 	 */
1068 	{ .start = 0x1E0030, .end = 0x1E0030 },
1069 	{ .start = 0x1E0510, .end = 0x1E0550 },
1070 	{ .start = 0x1E4030, .end = 0x1E4030 },
1071 	{ .start = 0x1E4510, .end = 0x1E4550 },
1072 	{ .start = 0x1E8030, .end = 0x1E8030 },
1073 	{ .start = 0x1E8510, .end = 0x1E8550 },
1074 	{ .start = 0x1F0030, .end = 0x1F0030 },
1075 	{ .start = 0x1F0510, .end = 0x1F0550 },
1076 	{ .start = 0x1F4030, .end = 0x1F4030 },
1077 	{ .start = 0x1F4510, .end = 0x1F4550 },
1078 	{ .start = 0x1F8030, .end = 0x1F8030 },
1079 	{ .start = 0x1F8510, .end = 0x1F8550 },
1080 };
1081 
1082 static const struct i915_range dg2_shadowed_regs[] = {
1083 	{ .start =   0x2030, .end =   0x2030 },
1084 	{ .start =   0x2510, .end =   0x2550 },
1085 	{ .start =   0xA008, .end =   0xA00C },
1086 	{ .start =   0xA188, .end =   0xA188 },
1087 	{ .start =   0xA278, .end =   0xA278 },
1088 	{ .start =   0xA540, .end =   0xA56C },
1089 	{ .start =   0xC4C8, .end =   0xC4C8 },
1090 	{ .start =   0xC4E0, .end =   0xC4E0 },
1091 	{ .start =   0xC600, .end =   0xC600 },
1092 	{ .start =   0xC658, .end =   0xC658 },
1093 	{ .start =  0x22030, .end =  0x22030 },
1094 	{ .start =  0x22510, .end =  0x22550 },
1095 	{ .start = 0x1C0030, .end = 0x1C0030 },
1096 	{ .start = 0x1C0510, .end = 0x1C0550 },
1097 	{ .start = 0x1C4030, .end = 0x1C4030 },
1098 	{ .start = 0x1C4510, .end = 0x1C4550 },
1099 	{ .start = 0x1C8030, .end = 0x1C8030 },
1100 	{ .start = 0x1C8510, .end = 0x1C8550 },
1101 	{ .start = 0x1D0030, .end = 0x1D0030 },
1102 	{ .start = 0x1D0510, .end = 0x1D0550 },
1103 	{ .start = 0x1D4030, .end = 0x1D4030 },
1104 	{ .start = 0x1D4510, .end = 0x1D4550 },
1105 	{ .start = 0x1D8030, .end = 0x1D8030 },
1106 	{ .start = 0x1D8510, .end = 0x1D8550 },
1107 	{ .start = 0x1E0030, .end = 0x1E0030 },
1108 	{ .start = 0x1E0510, .end = 0x1E0550 },
1109 	{ .start = 0x1E4030, .end = 0x1E4030 },
1110 	{ .start = 0x1E4510, .end = 0x1E4550 },
1111 	{ .start = 0x1E8030, .end = 0x1E8030 },
1112 	{ .start = 0x1E8510, .end = 0x1E8550 },
1113 	{ .start = 0x1F0030, .end = 0x1F0030 },
1114 	{ .start = 0x1F0510, .end = 0x1F0550 },
1115 	{ .start = 0x1F4030, .end = 0x1F4030 },
1116 	{ .start = 0x1F4510, .end = 0x1F4550 },
1117 	{ .start = 0x1F8030, .end = 0x1F8030 },
1118 	{ .start = 0x1F8510, .end = 0x1F8550 },
1119 };
1120 
1121 static const struct i915_range mtl_shadowed_regs[] = {
1122 	{ .start =   0x2030, .end =   0x2030 },
1123 	{ .start =   0x2510, .end =   0x2550 },
1124 	{ .start =   0xA008, .end =   0xA00C },
1125 	{ .start =   0xA188, .end =   0xA188 },
1126 	{ .start =   0xA278, .end =   0xA278 },
1127 	{ .start =   0xA540, .end =   0xA56C },
1128 	{ .start =   0xC050, .end =   0xC050 },
1129 	{ .start =   0xC340, .end =   0xC340 },
1130 	{ .start =   0xC4C8, .end =   0xC4C8 },
1131 	{ .start =   0xC4E0, .end =   0xC4E0 },
1132 	{ .start =   0xC600, .end =   0xC600 },
1133 	{ .start =   0xC658, .end =   0xC658 },
1134 	{ .start =   0xCFD4, .end =   0xCFDC },
1135 	{ .start =  0x22030, .end =  0x22030 },
1136 	{ .start =  0x22510, .end =  0x22550 },
1137 };
1138 
1139 static const struct i915_range xelpmp_shadowed_regs[] = {
1140 	{ .start = 0x1C0030, .end = 0x1C0030 },
1141 	{ .start = 0x1C0510, .end = 0x1C0550 },
1142 	{ .start = 0x1C8030, .end = 0x1C8030 },
1143 	{ .start = 0x1C8510, .end = 0x1C8550 },
1144 	{ .start = 0x1D0030, .end = 0x1D0030 },
1145 	{ .start = 0x1D0510, .end = 0x1D0550 },
1146 	{ .start = 0x38A008, .end = 0x38A00C },
1147 	{ .start = 0x38A188, .end = 0x38A188 },
1148 	{ .start = 0x38A278, .end = 0x38A278 },
1149 	{ .start = 0x38A540, .end = 0x38A56C },
1150 	{ .start = 0x38A618, .end = 0x38A618 },
1151 	{ .start = 0x38C050, .end = 0x38C050 },
1152 	{ .start = 0x38C340, .end = 0x38C340 },
1153 	{ .start = 0x38C4C8, .end = 0x38C4C8 },
1154 	{ .start = 0x38C4E0, .end = 0x38C4E4 },
1155 	{ .start = 0x38C600, .end = 0x38C600 },
1156 	{ .start = 0x38C658, .end = 0x38C658 },
1157 	{ .start = 0x38CFD4, .end = 0x38CFDC },
1158 };
1159 
1160 static int mmio_range_cmp(u32 key, const struct i915_range *range)
1161 {
1162 	if (key < range->start)
1163 		return -1;
1164 	else if (key > range->end)
1165 		return 1;
1166 	else
1167 		return 0;
1168 }
1169 
1170 static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
1171 {
1172 	if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
1173 		return false;
1174 
1175 	if (IS_GSI_REG(offset))
1176 		offset += uncore->gsi_offset;
1177 
1178 	return BSEARCH(offset,
1179 		       uncore->shadowed_reg_table,
1180 		       uncore->shadowed_reg_table_entries,
1181 		       mmio_range_cmp);
1182 }
1183 
1184 static enum forcewake_domains
1185 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
1186 {
1187 	return FORCEWAKE_RENDER;
1188 }
1189 
1190 #define __fwtable_reg_read_fw_domains(uncore, offset) \
1191 ({ \
1192 	enum forcewake_domains __fwd = 0; \
1193 	if (NEEDS_FORCE_WAKE((offset))) \
1194 		__fwd = find_fw_domain(uncore, offset); \
1195 	__fwd; \
1196 })
1197 
1198 #define __fwtable_reg_write_fw_domains(uncore, offset) \
1199 ({ \
1200 	enum forcewake_domains __fwd = 0; \
1201 	const u32 __offset = (offset); \
1202 	if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
1203 		__fwd = find_fw_domain(uncore, __offset); \
1204 	__fwd; \
1205 })
1206 
1207 #define GEN_FW_RANGE(s, e, d) \
1208 	{ .start = (s), .end = (e), .domains = (d) }
1209 
1210 /*
1211  * All platforms' forcewake tables below must be sorted by offset ranges.
1212  * Furthermore, new forcewake tables added should be "watertight" and have
1213  * no gaps between ranges.
1214  *
1215  * When there are multiple consecutive ranges listed in the bspec with
1216  * the same forcewake domain, it is customary to combine them into a single
1217  * row in the tables below to keep the tables small and lookups fast.
1218  * Likewise, reserved/unused ranges may be combined with the preceding and/or
1219  * following ranges since the driver will never be making MMIO accesses in
1220  * those ranges.
1221  *
1222  * For example, if the bspec were to list:
1223  *
1224  *    ...
1225  *    0x1000 - 0x1fff:  GT
1226  *    0x2000 - 0x2cff:  GT
1227  *    0x2d00 - 0x2fff:  unused/reserved
1228  *    0x3000 - 0xffff:  GT
1229  *    ...
1230  *
1231  * these could all be represented by a single line in the code:
1232  *
1233  *   GEN_FW_RANGE(0x1000, 0xffff, FORCEWAKE_GT)
1234  *
1235  * When adding new forcewake tables here, please also add them to
1236  * intel_uncore_mock_selftests in selftests/intel_uncore.c so that they will be
1237  * scanned for obvious mistakes or typos by the selftests.
1238  */
1239 
1240 static const struct intel_forcewake_range __gen6_fw_ranges[] = {
1241 	GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
1242 };
1243 
1244 static const struct intel_forcewake_range __vlv_fw_ranges[] = {
1245 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1246 	GEN_FW_RANGE(0x5000, 0x7fff, FORCEWAKE_RENDER),
1247 	GEN_FW_RANGE(0xb000, 0x11fff, FORCEWAKE_RENDER),
1248 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1249 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_MEDIA),
1250 	GEN_FW_RANGE(0x2e000, 0x2ffff, FORCEWAKE_RENDER),
1251 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1252 };
1253 
1254 static const struct intel_forcewake_range __chv_fw_ranges[] = {
1255 	GEN_FW_RANGE(0x2000, 0x3fff, FORCEWAKE_RENDER),
1256 	GEN_FW_RANGE(0x4000, 0x4fff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1257 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1258 	GEN_FW_RANGE(0x8000, 0x82ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1259 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1260 	GEN_FW_RANGE(0x8500, 0x85ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1261 	GEN_FW_RANGE(0x8800, 0x88ff, FORCEWAKE_MEDIA),
1262 	GEN_FW_RANGE(0x9000, 0xafff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1263 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1264 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1265 	GEN_FW_RANGE(0xe000, 0xe7ff, FORCEWAKE_RENDER),
1266 	GEN_FW_RANGE(0xf000, 0xffff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1267 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1268 	GEN_FW_RANGE(0x1a000, 0x1bfff, FORCEWAKE_MEDIA),
1269 	GEN_FW_RANGE(0x1e800, 0x1e9ff, FORCEWAKE_MEDIA),
1270 	GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
1271 };
1272 
1273 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
1274 	GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
1275 	GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */
1276 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1277 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1278 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1279 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1280 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1281 	GEN_FW_RANGE(0x8000, 0x812f, FORCEWAKE_GT),
1282 	GEN_FW_RANGE(0x8130, 0x813f, FORCEWAKE_MEDIA),
1283 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1284 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1285 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1286 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1287 	GEN_FW_RANGE(0x8800, 0x89ff, FORCEWAKE_MEDIA),
1288 	GEN_FW_RANGE(0x8a00, 0x8bff, FORCEWAKE_GT),
1289 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1290 	GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_GT),
1291 	GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_RENDER | FORCEWAKE_MEDIA),
1292 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1293 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1294 	GEN_FW_RANGE(0xb480, 0xcfff, FORCEWAKE_GT),
1295 	GEN_FW_RANGE(0xd000, 0xd7ff, FORCEWAKE_MEDIA),
1296 	GEN_FW_RANGE(0xd800, 0xdfff, FORCEWAKE_GT),
1297 	GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER),
1298 	GEN_FW_RANGE(0xe900, 0x11fff, FORCEWAKE_GT),
1299 	GEN_FW_RANGE(0x12000, 0x13fff, FORCEWAKE_MEDIA),
1300 	GEN_FW_RANGE(0x14000, 0x19fff, FORCEWAKE_GT),
1301 	GEN_FW_RANGE(0x1a000, 0x1e9ff, FORCEWAKE_MEDIA),
1302 	GEN_FW_RANGE(0x1ea00, 0x243ff, FORCEWAKE_GT),
1303 	GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER),
1304 	GEN_FW_RANGE(0x24800, 0x2ffff, FORCEWAKE_GT),
1305 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
1306 };
1307 
1308 static const struct intel_forcewake_range __gen11_fw_ranges[] = {
1309 	GEN_FW_RANGE(0x0, 0x1fff, 0), /* uncore range */
1310 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1311 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1312 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1313 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT),
1314 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1315 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1316 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1317 	GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_GT),
1318 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1319 	GEN_FW_RANGE(0x8500, 0x87ff, FORCEWAKE_GT),
1320 	GEN_FW_RANGE(0x8800, 0x8bff, 0),
1321 	GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER),
1322 	GEN_FW_RANGE(0x8d00, 0x94cf, FORCEWAKE_GT),
1323 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1324 	GEN_FW_RANGE(0x9560, 0x95ff, 0),
1325 	GEN_FW_RANGE(0x9600, 0xafff, FORCEWAKE_GT),
1326 	GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER),
1327 	GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_GT),
1328 	GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER),
1329 	GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_GT),
1330 	GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER),
1331 	GEN_FW_RANGE(0x1a000, 0x23fff, FORCEWAKE_GT),
1332 	GEN_FW_RANGE(0x24000, 0x2407f, 0),
1333 	GEN_FW_RANGE(0x24080, 0x2417f, FORCEWAKE_GT),
1334 	GEN_FW_RANGE(0x24180, 0x242ff, FORCEWAKE_RENDER),
1335 	GEN_FW_RANGE(0x24300, 0x243ff, FORCEWAKE_GT),
1336 	GEN_FW_RANGE(0x24400, 0x24fff, FORCEWAKE_RENDER),
1337 	GEN_FW_RANGE(0x25000, 0x3ffff, FORCEWAKE_GT),
1338 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1339 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0),
1340 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1341 	GEN_FW_RANGE(0x1c8000, 0x1cffff, FORCEWAKE_MEDIA_VEBOX0),
1342 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2),
1343 	GEN_FW_RANGE(0x1d4000, 0x1dbfff, 0)
1344 };
1345 
1346 static const struct intel_forcewake_range __gen12_fw_ranges[] = {
1347 	GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1348 		0x0   -  0xaff: reserved
1349 		0xb00 - 0x1fff: always on */
1350 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1351 	GEN_FW_RANGE(0x2700, 0x27ff, FORCEWAKE_GT),
1352 	GEN_FW_RANGE(0x2800, 0x2aff, FORCEWAKE_RENDER),
1353 	GEN_FW_RANGE(0x2b00, 0x2fff, FORCEWAKE_GT),
1354 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1355 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1356 		0x4000 - 0x48ff: gt
1357 		0x4900 - 0x51ff: reserved */
1358 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1359 		0x5200 - 0x53ff: render
1360 		0x5400 - 0x54ff: reserved
1361 		0x5500 - 0x7fff: render */
1362 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1363 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1364 	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1365 		0x8160 - 0x817f: reserved
1366 		0x8180 - 0x81ff: always on */
1367 	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1368 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1369 	GEN_FW_RANGE(0x8500, 0x94cf, FORCEWAKE_GT), /*
1370 		0x8500 - 0x87ff: gt
1371 		0x8800 - 0x8fff: reserved
1372 		0x9000 - 0x947f: gt
1373 		0x9480 - 0x94cf: reserved */
1374 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1375 	GEN_FW_RANGE(0x9560, 0x97ff, 0), /*
1376 		0x9560 - 0x95ff: always on
1377 		0x9600 - 0x97ff: reserved */
1378 	GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_GT),
1379 	GEN_FW_RANGE(0xb000, 0xb3ff, FORCEWAKE_RENDER),
1380 	GEN_FW_RANGE(0xb400, 0xcfff, FORCEWAKE_GT), /*
1381 		0xb400 - 0xbf7f: gt
1382 		0xb480 - 0xbfff: reserved
1383 		0xc000 - 0xcfff: gt */
1384 	GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1385 	GEN_FW_RANGE(0xd800, 0xd8ff, FORCEWAKE_RENDER),
1386 	GEN_FW_RANGE(0xd900, 0xdbff, FORCEWAKE_GT),
1387 	GEN_FW_RANGE(0xdc00, 0xefff, FORCEWAKE_RENDER), /*
1388 		0xdc00 - 0xddff: render
1389 		0xde00 - 0xde7f: reserved
1390 		0xde80 - 0xe8ff: render
1391 		0xe900 - 0xefff: reserved */
1392 	GEN_FW_RANGE(0xf000, 0x147ff, FORCEWAKE_GT), /*
1393 		 0xf000 - 0xffff: gt
1394 		0x10000 - 0x147ff: reserved */
1395 	GEN_FW_RANGE(0x14800, 0x1ffff, FORCEWAKE_RENDER), /*
1396 		0x14800 - 0x14fff: render
1397 		0x15000 - 0x16dff: reserved
1398 		0x16e00 - 0x1bfff: render
1399 		0x1c000 - 0x1ffff: reserved */
1400 	GEN_FW_RANGE(0x20000, 0x20fff, FORCEWAKE_MEDIA_VDBOX0),
1401 	GEN_FW_RANGE(0x21000, 0x21fff, FORCEWAKE_MEDIA_VDBOX2),
1402 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1403 	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1404 		0x24000 - 0x2407f: always on
1405 		0x24080 - 0x2417f: reserved */
1406 	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1407 		0x24180 - 0x241ff: gt
1408 		0x24200 - 0x249ff: reserved */
1409 	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1410 		0x24a00 - 0x24a7f: render
1411 		0x24a80 - 0x251ff: reserved */
1412 	GEN_FW_RANGE(0x25200, 0x255ff, FORCEWAKE_GT), /*
1413 		0x25200 - 0x252ff: gt
1414 		0x25300 - 0x255ff: reserved */
1415 	GEN_FW_RANGE(0x25600, 0x2567f, FORCEWAKE_MEDIA_VDBOX0),
1416 	GEN_FW_RANGE(0x25680, 0x259ff, FORCEWAKE_MEDIA_VDBOX2), /*
1417 		0x25680 - 0x256ff: VD2
1418 		0x25700 - 0x259ff: reserved */
1419 	GEN_FW_RANGE(0x25a00, 0x25a7f, FORCEWAKE_MEDIA_VDBOX0),
1420 	GEN_FW_RANGE(0x25a80, 0x2ffff, FORCEWAKE_MEDIA_VDBOX2), /*
1421 		0x25a80 - 0x25aff: VD2
1422 		0x25b00 - 0x2ffff: reserved */
1423 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1424 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1425 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1426 		0x1c0000 - 0x1c2bff: VD0
1427 		0x1c2c00 - 0x1c2cff: reserved
1428 		0x1c2d00 - 0x1c2dff: VD0
1429 		0x1c2e00 - 0x1c3eff: reserved
1430 		0x1c3f00 - 0x1c3fff: VD0 */
1431 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, 0),
1432 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1433 		0x1c8000 - 0x1ca0ff: VE0
1434 		0x1ca100 - 0x1cbeff: reserved
1435 		0x1cbf00 - 0x1cbfff: VE0 */
1436 	GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1437 		0x1cc000 - 0x1ccfff: VD0
1438 		0x1cd000 - 0x1cffff: reserved */
1439 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1440 		0x1d0000 - 0x1d2bff: VD2
1441 		0x1d2c00 - 0x1d2cff: reserved
1442 		0x1d2d00 - 0x1d2dff: VD2
1443 		0x1d2e00 - 0x1d3eff: reserved
1444 		0x1d3f00 - 0x1d3fff: VD2 */
1445 };
1446 
1447 static const struct intel_forcewake_range __dg2_fw_ranges[] = {
1448 	GEN_FW_RANGE(0x0, 0x1fff, 0), /*
1449 		  0x0 -  0xaff: reserved
1450 		0xb00 - 0x1fff: always on */
1451 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1452 	GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),
1453 	GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*
1454 		0x4b00 - 0x4fff: reserved
1455 		0x5000 - 0x51ff: always on */
1456 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),
1457 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1458 	GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),
1459 	GEN_FW_RANGE(0x8160, 0x81ff, 0), /*
1460 		0x8160 - 0x817f: reserved
1461 		0x8180 - 0x81ff: always on */
1462 	GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),
1463 	GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),
1464 	GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*
1465 		0x8500 - 0x87ff: gt
1466 		0x8800 - 0x8c7f: reserved
1467 		0x8c80 - 0x8cff: gt (DG2 only) */
1468 	GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*
1469 		0x8d00 - 0x8dff: render (DG2 only)
1470 		0x8e00 - 0x8fff: reserved */
1471 	GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*
1472 		0x9000 - 0x947f: gt
1473 		0x9480 - 0x94cf: reserved */
1474 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1475 	GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1476 		0x9560 - 0x95ff: always on
1477 		0x9600 - 0x967f: reserved */
1478 	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1479 		0x9680 - 0x96ff: render
1480 		0x9700 - 0x97ff: reserved */
1481 	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1482 		0x9800 - 0xb4ff: gt
1483 		0xb500 - 0xbfff: reserved
1484 		0xc000 - 0xcfff: gt */
1485 	GEN_FW_RANGE(0xd000, 0xd7ff, 0),
1486 	GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1487 	GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1488 	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1489 	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1490 		0xdd00 - 0xddff: gt
1491 		0xde00 - 0xde7f: reserved */
1492 	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1493 		0xde80 - 0xdfff: render
1494 		0xe000 - 0xe0ff: reserved
1495 		0xe100 - 0xe8ff: render */
1496 	GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*
1497 		0xe900 - 0xe9ff: gt
1498 		0xea00 - 0xefff: reserved
1499 		0xf000 - 0xffff: gt */
1500 	GEN_FW_RANGE(0x10000, 0x12fff, 0), /*
1501 		0x10000 - 0x11fff: reserved
1502 		0x12000 - 0x127ff: always on
1503 		0x12800 - 0x12fff: reserved */
1504 	GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0),
1505 	GEN_FW_RANGE(0x13200, 0x147ff, FORCEWAKE_MEDIA_VDBOX2), /*
1506 		0x13200 - 0x133ff: VD2 (DG2 only)
1507 		0x13400 - 0x147ff: reserved */
1508 	GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),
1509 	GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*
1510 		0x15000 - 0x15fff: gt (DG2 only)
1511 		0x16000 - 0x16dff: reserved */
1512 	GEN_FW_RANGE(0x16e00, 0x21fff, FORCEWAKE_RENDER), /*
1513 		0x16e00 - 0x1ffff: render
1514 		0x20000 - 0x21fff: reserved */
1515 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1516 	GEN_FW_RANGE(0x24000, 0x2417f, 0), /*
1517 		0x24000 - 0x2407f: always on
1518 		0x24080 - 0x2417f: reserved */
1519 	GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*
1520 		0x24180 - 0x241ff: gt
1521 		0x24200 - 0x249ff: reserved */
1522 	GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*
1523 		0x24a00 - 0x24a7f: render
1524 		0x24a80 - 0x251ff: reserved */
1525 	GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*
1526 		0x25200 - 0x252ff: gt
1527 		0x25300 - 0x25fff: reserved */
1528 	GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*
1529 		0x26000 - 0x27fff: render
1530 		0x28000 - 0x29fff: reserved
1531 		0x2a000 - 0x2ffff: undocumented */
1532 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1533 	GEN_FW_RANGE(0x40000, 0x1bffff, 0),
1534 	GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*
1535 		0x1c0000 - 0x1c2bff: VD0
1536 		0x1c2c00 - 0x1c2cff: reserved
1537 		0x1c2d00 - 0x1c2dff: VD0
1538 		0x1c2e00 - 0x1c3eff: VD0
1539 		0x1c3f00 - 0x1c3fff: VD0 */
1540 	GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*
1541 		0x1c4000 - 0x1c6bff: VD1
1542 		0x1c6c00 - 0x1c6cff: reserved
1543 		0x1c6d00 - 0x1c6dff: VD1
1544 		0x1c6e00 - 0x1c7fff: reserved */
1545 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1546 		0x1c8000 - 0x1ca0ff: VE0
1547 		0x1ca100 - 0x1cbfff: reserved */
1548 	GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),
1549 	GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),
1550 	GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),
1551 	GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),
1552 	GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*
1553 		0x1d0000 - 0x1d2bff: VD2
1554 		0x1d2c00 - 0x1d2cff: reserved
1555 		0x1d2d00 - 0x1d2dff: VD2
1556 		0x1d2e00 - 0x1d3dff: VD2
1557 		0x1d3e00 - 0x1d3eff: reserved
1558 		0x1d3f00 - 0x1d3fff: VD2 */
1559 	GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*
1560 		0x1d4000 - 0x1d6bff: VD3
1561 		0x1d6c00 - 0x1d6cff: reserved
1562 		0x1d6d00 - 0x1d6dff: VD3
1563 		0x1d6e00 - 0x1d7fff: reserved */
1564 	GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*
1565 		0x1d8000 - 0x1da0ff: VE1
1566 		0x1da100 - 0x1dffff: reserved */
1567 	GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*
1568 		0x1e0000 - 0x1e2bff: VD4
1569 		0x1e2c00 - 0x1e2cff: reserved
1570 		0x1e2d00 - 0x1e2dff: VD4
1571 		0x1e2e00 - 0x1e3eff: reserved
1572 		0x1e3f00 - 0x1e3fff: VD4 */
1573 	GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*
1574 		0x1e4000 - 0x1e6bff: VD5
1575 		0x1e6c00 - 0x1e6cff: reserved
1576 		0x1e6d00 - 0x1e6dff: VD5
1577 		0x1e6e00 - 0x1e7fff: reserved */
1578 	GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*
1579 		0x1e8000 - 0x1ea0ff: VE2
1580 		0x1ea100 - 0x1effff: reserved */
1581 	GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*
1582 		0x1f0000 - 0x1f2bff: VD6
1583 		0x1f2c00 - 0x1f2cff: reserved
1584 		0x1f2d00 - 0x1f2dff: VD6
1585 		0x1f2e00 - 0x1f3eff: reserved
1586 		0x1f3f00 - 0x1f3fff: VD6 */
1587 	GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*
1588 		0x1f4000 - 0x1f6bff: VD7
1589 		0x1f6c00 - 0x1f6cff: reserved
1590 		0x1f6d00 - 0x1f6dff: VD7
1591 		0x1f6e00 - 0x1f7fff: reserved */
1592 	GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
1593 };
1594 
1595 static const struct intel_forcewake_range __mtl_fw_ranges[] = {
1596 	GEN_FW_RANGE(0x0, 0xaff, 0),
1597 	GEN_FW_RANGE(0xb00, 0xbff, FORCEWAKE_GT),
1598 	GEN_FW_RANGE(0xc00, 0xfff, 0),
1599 	GEN_FW_RANGE(0x1000, 0x1fff, FORCEWAKE_GT),
1600 	GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),
1601 	GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_GT),
1602 	GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER),
1603 	GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_GT), /*
1604 		0x4000 - 0x48ff: render
1605 		0x4900 - 0x51ff: reserved */
1606 	GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), /*
1607 		0x5200 - 0x53ff: render
1608 		0x5400 - 0x54ff: reserved
1609 		0x5500 - 0x7fff: render */
1610 	GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),
1611 	GEN_FW_RANGE(0x8140, 0x817f, FORCEWAKE_RENDER), /*
1612 		0x8140 - 0x815f: render
1613 		0x8160 - 0x817f: reserved */
1614 	GEN_FW_RANGE(0x8180, 0x81ff, 0),
1615 	GEN_FW_RANGE(0x8200, 0x94cf, FORCEWAKE_GT), /*
1616 		0x8200 - 0x87ff: gt
1617 		0x8800 - 0x8dff: reserved
1618 		0x8e00 - 0x8f7f: gt
1619 		0x8f80 - 0x8fff: reserved
1620 		0x9000 - 0x947f: gt
1621 		0x9480 - 0x94cf: reserved */
1622 	GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),
1623 	GEN_FW_RANGE(0x9560, 0x967f, 0), /*
1624 		0x9560 - 0x95ff: always on
1625 		0x9600 - 0x967f: reserved */
1626 	GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*
1627 		0x9680 - 0x96ff: render
1628 		0x9700 - 0x97ff: reserved */
1629 	GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*
1630 		0x9800 - 0xb4ff: gt
1631 		0xb500 - 0xbfff: reserved
1632 		0xc000 - 0xcfff: gt */
1633 	GEN_FW_RANGE(0xd000, 0xd7ff, 0), /*
1634 		0xd000 - 0xd3ff: always on
1635 		0xd400 - 0xd7ff: reserved */
1636 	GEN_FW_RANGE(0xd800, 0xd87f, FORCEWAKE_RENDER),
1637 	GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),
1638 	GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),
1639 	GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*
1640 		0xdd00 - 0xddff: gt
1641 		0xde00 - 0xde7f: reserved */
1642 	GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*
1643 		0xde80 - 0xdfff: render
1644 		0xe000 - 0xe0ff: reserved
1645 		0xe100 - 0xe8ff: render */
1646 	GEN_FW_RANGE(0xe900, 0xe9ff, FORCEWAKE_GT),
1647 	GEN_FW_RANGE(0xea00, 0x147ff, 0), /*
1648 		 0xea00 - 0x11fff: reserved
1649 		0x12000 - 0x127ff: always on
1650 		0x12800 - 0x147ff: reserved */
1651 	GEN_FW_RANGE(0x14800, 0x19fff, FORCEWAKE_GT), /*
1652 		0x14800 - 0x153ff: gt
1653 		0x15400 - 0x19fff: reserved */
1654 	GEN_FW_RANGE(0x1a000, 0x21fff, FORCEWAKE_RENDER), /*
1655 		0x1a000 - 0x1bfff: render
1656 		0x1c000 - 0x21fff: reserved */
1657 	GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),
1658 	GEN_FW_RANGE(0x24000, 0x2ffff, 0), /*
1659 		0x24000 - 0x2407f: always on
1660 		0x24080 - 0x2ffff: reserved */
1661 	GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),
1662 	GEN_FW_RANGE(0x40000, 0x1901ef, 0),
1663 	GEN_FW_RANGE(0x1901f0, 0x1901f3, FORCEWAKE_GT)
1664 		/* FIXME: WA to wake GT while triggering H2G */
1665 };
1666 
1667 /*
1668  * Note that the register ranges here are the final offsets after
1669  * translation of the GSI block to the 0x380000 offset.
1670  *
1671  * NOTE:  There are a couple MCR ranges near the bottom of this table
1672  * that need to power up either VD0 or VD2 depending on which replicated
1673  * instance of the register we're trying to access.  Our forcewake logic
1674  * at the moment doesn't have a good way to take steering into consideration,
1675  * and the driver doesn't even access any registers in those ranges today,
1676  * so for now we just mark those ranges as FORCEWAKE_ALL.  That will ensure
1677  * proper operation if we do start using the ranges in the future, and we
1678  * can determine at that time whether it's worth adding extra complexity to
1679  * the forcewake handling to take steering into consideration.
1680  */
1681 static const struct intel_forcewake_range __xelpmp_fw_ranges[] = {
1682 	GEN_FW_RANGE(0x0, 0x115fff, 0), /* render GT range */
1683 	GEN_FW_RANGE(0x116000, 0x11ffff, FORCEWAKE_GSC), /*
1684 		0x116000 - 0x117fff: gsc
1685 		0x118000 - 0x119fff: reserved
1686 		0x11a000 - 0x11efff: gsc
1687 		0x11f000 - 0x11ffff: reserved */
1688 	GEN_FW_RANGE(0x120000, 0x1bffff, 0), /* non-GT range */
1689 	GEN_FW_RANGE(0x1c0000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX0), /*
1690 		0x1c0000 - 0x1c3dff: VD0
1691 		0x1c3e00 - 0x1c3eff: reserved
1692 		0x1c3f00 - 0x1c3fff: VD0
1693 		0x1c4000 - 0x1c7fff: reserved */
1694 	GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*
1695 		0x1c8000 - 0x1ca0ff: VE0
1696 		0x1ca100 - 0x1cbfff: reserved */
1697 	GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX0), /*
1698 		0x1cc000 - 0x1cdfff: VD0
1699 		0x1ce000 - 0x1cffff: reserved */
1700 	GEN_FW_RANGE(0x1d0000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX2), /*
1701 		0x1d0000 - 0x1d3dff: VD2
1702 		0x1d3e00 - 0x1d3eff: reserved
1703 		0x1d4000 - 0x1d7fff: VD2 */
1704 	GEN_FW_RANGE(0x1d8000, 0x1da0ff, FORCEWAKE_MEDIA_VEBOX1),
1705 	GEN_FW_RANGE(0x1da100, 0x380aff, 0), /*
1706 		0x1da100 - 0x23ffff: reserved
1707 		0x240000 - 0x37ffff: non-GT range
1708 		0x380000 - 0x380aff: reserved */
1709 	GEN_FW_RANGE(0x380b00, 0x380bff, FORCEWAKE_GT),
1710 	GEN_FW_RANGE(0x380c00, 0x380fff, 0),
1711 	GEN_FW_RANGE(0x381000, 0x38817f, FORCEWAKE_GT), /*
1712 		0x381000 - 0x381fff: gt
1713 		0x382000 - 0x383fff: reserved
1714 		0x384000 - 0x384aff: gt
1715 		0x384b00 - 0x3851ff: reserved
1716 		0x385200 - 0x3871ff: gt
1717 		0x387200 - 0x387fff: reserved
1718 		0x388000 - 0x38813f: gt
1719 		0x388140 - 0x38817f: reserved */
1720 	GEN_FW_RANGE(0x388180, 0x3882ff, 0), /*
1721 		0x388180 - 0x3881ff: always on
1722 		0x388200 - 0x3882ff: reserved */
1723 	GEN_FW_RANGE(0x388300, 0x38955f, FORCEWAKE_GT), /*
1724 		0x388300 - 0x38887f: gt
1725 		0x388880 - 0x388fff: reserved
1726 		0x389000 - 0x38947f: gt
1727 		0x389480 - 0x38955f: reserved */
1728 	GEN_FW_RANGE(0x389560, 0x389fff, 0), /*
1729 		0x389560 - 0x3895ff: always on
1730 		0x389600 - 0x389fff: reserved */
1731 	GEN_FW_RANGE(0x38a000, 0x38cfff, FORCEWAKE_GT), /*
1732 		0x38a000 - 0x38afff: gt
1733 		0x38b000 - 0x38bfff: reserved
1734 		0x38c000 - 0x38cfff: gt */
1735 	GEN_FW_RANGE(0x38d000, 0x38d11f, 0),
1736 	GEN_FW_RANGE(0x38d120, 0x391fff, FORCEWAKE_GT), /*
1737 		0x38d120 - 0x38dfff: gt
1738 		0x38e000 - 0x38efff: reserved
1739 		0x38f000 - 0x38ffff: gt
1740 		0x389000 - 0x391fff: reserved */
1741 	GEN_FW_RANGE(0x392000, 0x392fff, 0), /*
1742 		0x392000 - 0x3927ff: always on
1743 		0x392800 - 0x292fff: reserved */
1744 	GEN_FW_RANGE(0x393000, 0x3931ff, FORCEWAKE_GT),
1745 	GEN_FW_RANGE(0x393200, 0x39323f, FORCEWAKE_ALL), /* instance-based, see note above */
1746 	GEN_FW_RANGE(0x393240, 0x3933ff, FORCEWAKE_GT),
1747 	GEN_FW_RANGE(0x393400, 0x3934ff, FORCEWAKE_ALL), /* instance-based, see note above */
1748 	GEN_FW_RANGE(0x393500, 0x393c7f, 0), /*
1749 		0x393500 - 0x393bff: reserved
1750 		0x393c00 - 0x393c7f: always on */
1751 	GEN_FW_RANGE(0x393c80, 0x393dff, FORCEWAKE_GT),
1752 };
1753 
1754 static void
1755 ilk_dummy_write(struct intel_uncore *uncore)
1756 {
1757 	/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
1758 	 * the chip from rc6 before touching it for real. MI_MODE is masked,
1759 	 * hence harmless to write 0 into. */
1760 	__raw_uncore_write32(uncore, RING_MI_MODE(RENDER_RING_BASE), 0);
1761 }
1762 
1763 static void
1764 __unclaimed_reg_debug(struct intel_uncore *uncore,
1765 		      const i915_reg_t reg,
1766 		      const bool read)
1767 {
1768 	if (drm_WARN(&uncore->i915->drm,
1769 		     check_for_unclaimed_mmio(uncore),
1770 		     "Unclaimed %s register 0x%x\n",
1771 		     read ? "read from" : "write to",
1772 		     i915_mmio_reg_offset(reg)))
1773 		/* Only report the first N failures */
1774 		uncore->i915->params.mmio_debug--;
1775 }
1776 
1777 static void
1778 __unclaimed_previous_reg_debug(struct intel_uncore *uncore,
1779 			       const i915_reg_t reg,
1780 			       const bool read)
1781 {
1782 	if (check_for_unclaimed_mmio(uncore))
1783 		drm_dbg(&uncore->i915->drm,
1784 			"Unclaimed access detected before %s register 0x%x\n",
1785 			read ? "read from" : "write to",
1786 			i915_mmio_reg_offset(reg));
1787 }
1788 
1789 static inline bool __must_check
1790 unclaimed_reg_debug_header(struct intel_uncore *uncore,
1791 			   const i915_reg_t reg, const bool read)
1792 {
1793 	if (likely(!uncore->i915->params.mmio_debug) || !uncore->debug)
1794 		return false;
1795 
1796 	/* interrupts are disabled and re-enabled around uncore->lock usage */
1797 	lockdep_assert_held(&uncore->lock);
1798 
1799 	spin_lock(&uncore->debug->lock);
1800 	__unclaimed_previous_reg_debug(uncore, reg, read);
1801 
1802 	return true;
1803 }
1804 
1805 static inline void
1806 unclaimed_reg_debug_footer(struct intel_uncore *uncore,
1807 			   const i915_reg_t reg, const bool read)
1808 {
1809 	/* interrupts are disabled and re-enabled around uncore->lock usage */
1810 	lockdep_assert_held(&uncore->lock);
1811 
1812 	__unclaimed_reg_debug(uncore, reg, read);
1813 	spin_unlock(&uncore->debug->lock);
1814 }
1815 
1816 #define __vgpu_read(x) \
1817 static u##x \
1818 vgpu_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1819 	u##x val = __raw_uncore_read##x(uncore, reg); \
1820 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1821 	return val; \
1822 }
1823 __vgpu_read(8)
1824 __vgpu_read(16)
1825 __vgpu_read(32)
1826 __vgpu_read(64)
1827 
1828 #define GEN2_READ_HEADER(x) \
1829 	u##x val = 0; \
1830 	assert_rpm_wakelock_held(uncore->rpm);
1831 
1832 #define GEN2_READ_FOOTER \
1833 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1834 	return val
1835 
1836 #define __gen2_read(x) \
1837 static u##x \
1838 gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1839 	GEN2_READ_HEADER(x); \
1840 	val = __raw_uncore_read##x(uncore, reg); \
1841 	GEN2_READ_FOOTER; \
1842 }
1843 
1844 #define __gen5_read(x) \
1845 static u##x \
1846 gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
1847 	GEN2_READ_HEADER(x); \
1848 	ilk_dummy_write(uncore); \
1849 	val = __raw_uncore_read##x(uncore, reg); \
1850 	GEN2_READ_FOOTER; \
1851 }
1852 
1853 __gen5_read(8)
1854 __gen5_read(16)
1855 __gen5_read(32)
1856 __gen5_read(64)
1857 __gen2_read(8)
1858 __gen2_read(16)
1859 __gen2_read(32)
1860 __gen2_read(64)
1861 
1862 #undef __gen5_read
1863 #undef __gen2_read
1864 
1865 #undef GEN2_READ_FOOTER
1866 #undef GEN2_READ_HEADER
1867 
1868 #define GEN6_READ_HEADER(x) \
1869 	u32 offset = i915_mmio_reg_offset(reg); \
1870 	unsigned long irqflags; \
1871 	bool unclaimed_reg_debug; \
1872 	u##x val = 0; \
1873 	assert_rpm_wakelock_held(uncore->rpm); \
1874 	spin_lock_irqsave(&uncore->lock, irqflags); \
1875 	unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, true)
1876 
1877 #define GEN6_READ_FOOTER \
1878 	if (unclaimed_reg_debug) \
1879 		unclaimed_reg_debug_footer(uncore, reg, true);	\
1880 	spin_unlock_irqrestore(&uncore->lock, irqflags); \
1881 	trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
1882 	return val
1883 
1884 static noinline void ___force_wake_auto(struct intel_uncore *uncore,
1885 					enum forcewake_domains fw_domains)
1886 {
1887 	struct intel_uncore_forcewake_domain *domain;
1888 	unsigned int tmp;
1889 
1890 	GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
1891 
1892 	for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
1893 		fw_domain_arm_timer(domain);
1894 
1895 	fw_domains_get(uncore, fw_domains);
1896 }
1897 
1898 static inline void __force_wake_auto(struct intel_uncore *uncore,
1899 				     enum forcewake_domains fw_domains)
1900 {
1901 	GEM_BUG_ON(!fw_domains);
1902 
1903 	/* Turn on all requested but inactive supported forcewake domains. */
1904 	fw_domains &= uncore->fw_domains;
1905 	fw_domains &= ~uncore->fw_domains_active;
1906 
1907 	if (fw_domains)
1908 		___force_wake_auto(uncore, fw_domains);
1909 }
1910 
1911 #define __gen_fwtable_read(x) \
1912 static u##x \
1913 fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
1914 { \
1915 	enum forcewake_domains fw_engine; \
1916 	GEN6_READ_HEADER(x); \
1917 	fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
1918 	if (fw_engine) \
1919 		__force_wake_auto(uncore, fw_engine); \
1920 	val = __raw_uncore_read##x(uncore, reg); \
1921 	GEN6_READ_FOOTER; \
1922 }
1923 
1924 static enum forcewake_domains
1925 fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
1926 	return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
1927 }
1928 
1929 __gen_fwtable_read(8)
1930 __gen_fwtable_read(16)
1931 __gen_fwtable_read(32)
1932 __gen_fwtable_read(64)
1933 
1934 #undef __gen_fwtable_read
1935 #undef GEN6_READ_FOOTER
1936 #undef GEN6_READ_HEADER
1937 
1938 #define GEN2_WRITE_HEADER \
1939 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1940 	assert_rpm_wakelock_held(uncore->rpm); \
1941 
1942 #define GEN2_WRITE_FOOTER
1943 
1944 #define __gen2_write(x) \
1945 static void \
1946 gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1947 	GEN2_WRITE_HEADER; \
1948 	__raw_uncore_write##x(uncore, reg, val); \
1949 	GEN2_WRITE_FOOTER; \
1950 }
1951 
1952 #define __gen5_write(x) \
1953 static void \
1954 gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1955 	GEN2_WRITE_HEADER; \
1956 	ilk_dummy_write(uncore); \
1957 	__raw_uncore_write##x(uncore, reg, val); \
1958 	GEN2_WRITE_FOOTER; \
1959 }
1960 
1961 __gen5_write(8)
1962 __gen5_write(16)
1963 __gen5_write(32)
1964 __gen2_write(8)
1965 __gen2_write(16)
1966 __gen2_write(32)
1967 
1968 #undef __gen5_write
1969 #undef __gen2_write
1970 
1971 #undef GEN2_WRITE_FOOTER
1972 #undef GEN2_WRITE_HEADER
1973 
1974 #define GEN6_WRITE_HEADER \
1975 	u32 offset = i915_mmio_reg_offset(reg); \
1976 	unsigned long irqflags; \
1977 	bool unclaimed_reg_debug; \
1978 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
1979 	assert_rpm_wakelock_held(uncore->rpm); \
1980 	spin_lock_irqsave(&uncore->lock, irqflags); \
1981 	unclaimed_reg_debug = unclaimed_reg_debug_header(uncore, reg, false)
1982 
1983 #define GEN6_WRITE_FOOTER \
1984 	if (unclaimed_reg_debug) \
1985 		unclaimed_reg_debug_footer(uncore, reg, false); \
1986 	spin_unlock_irqrestore(&uncore->lock, irqflags)
1987 
1988 #define __gen6_write(x) \
1989 static void \
1990 gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
1991 	GEN6_WRITE_HEADER; \
1992 	if (NEEDS_FORCE_WAKE(offset)) \
1993 		__gen6_gt_wait_for_fifo(uncore); \
1994 	__raw_uncore_write##x(uncore, reg, val); \
1995 	GEN6_WRITE_FOOTER; \
1996 }
1997 __gen6_write(8)
1998 __gen6_write(16)
1999 __gen6_write(32)
2000 
2001 #define __gen_fwtable_write(x) \
2002 static void \
2003 fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2004 	enum forcewake_domains fw_engine; \
2005 	GEN6_WRITE_HEADER; \
2006 	fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
2007 	if (fw_engine) \
2008 		__force_wake_auto(uncore, fw_engine); \
2009 	__raw_uncore_write##x(uncore, reg, val); \
2010 	GEN6_WRITE_FOOTER; \
2011 }
2012 
2013 static enum forcewake_domains
2014 fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
2015 {
2016 	return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
2017 }
2018 
2019 __gen_fwtable_write(8)
2020 __gen_fwtable_write(16)
2021 __gen_fwtable_write(32)
2022 
2023 #undef __gen_fwtable_write
2024 #undef GEN6_WRITE_FOOTER
2025 #undef GEN6_WRITE_HEADER
2026 
2027 #define __vgpu_write(x) \
2028 static void \
2029 vgpu_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
2030 	trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
2031 	__raw_uncore_write##x(uncore, reg, val); \
2032 }
2033 __vgpu_write(8)
2034 __vgpu_write(16)
2035 __vgpu_write(32)
2036 
2037 #define ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, x) \
2038 do { \
2039 	(uncore)->funcs.mmio_writeb = x##_write8; \
2040 	(uncore)->funcs.mmio_writew = x##_write16; \
2041 	(uncore)->funcs.mmio_writel = x##_write32; \
2042 } while (0)
2043 
2044 #define ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x) \
2045 do { \
2046 	(uncore)->funcs.mmio_readb = x##_read8; \
2047 	(uncore)->funcs.mmio_readw = x##_read16; \
2048 	(uncore)->funcs.mmio_readl = x##_read32; \
2049 	(uncore)->funcs.mmio_readq = x##_read64; \
2050 } while (0)
2051 
2052 #define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
2053 do { \
2054 	ASSIGN_RAW_WRITE_MMIO_VFUNCS((uncore), x); \
2055 	(uncore)->funcs.write_fw_domains = x##_reg_write_fw_domains; \
2056 } while (0)
2057 
2058 #define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
2059 do { \
2060 	ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, x); \
2061 	(uncore)->funcs.read_fw_domains = x##_reg_read_fw_domains; \
2062 } while (0)
2063 
2064 static int __fw_domain_init(struct intel_uncore *uncore,
2065 			    enum forcewake_domain_id domain_id,
2066 			    i915_reg_t reg_set,
2067 			    i915_reg_t reg_ack)
2068 {
2069 	struct intel_uncore_forcewake_domain *d;
2070 
2071 	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2072 	GEM_BUG_ON(uncore->fw_domain[domain_id]);
2073 
2074 	if (i915_inject_probe_failure(uncore->i915))
2075 		return -ENOMEM;
2076 
2077 	d = kzalloc(sizeof(*d), GFP_KERNEL);
2078 	if (!d)
2079 		return -ENOMEM;
2080 
2081 	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
2082 	drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
2083 
2084 	d->uncore = uncore;
2085 	d->wake_count = 0;
2086 	d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set) + uncore->gsi_offset;
2087 	d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack) + uncore->gsi_offset;
2088 
2089 	d->id = domain_id;
2090 
2091 	BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
2092 	BUILD_BUG_ON(FORCEWAKE_GT != (1 << FW_DOMAIN_ID_GT));
2093 	BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
2094 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX0));
2095 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
2096 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
2097 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
2098 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
2099 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
2100 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
2101 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
2102 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
2103 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
2104 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
2105 	BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
2106 	BUILD_BUG_ON(FORCEWAKE_GSC != (1 << FW_DOMAIN_ID_GSC));
2107 
2108 	d->mask = BIT(domain_id);
2109 
2110 	hrtimer_setup(&d->timer, intel_uncore_fw_release_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2111 
2112 	uncore->fw_domains |= BIT(domain_id);
2113 
2114 	fw_domain_reset(d);
2115 
2116 	uncore->fw_domain[domain_id] = d;
2117 
2118 	return 0;
2119 }
2120 
2121 static void fw_domain_fini(struct intel_uncore *uncore,
2122 			   enum forcewake_domain_id domain_id)
2123 {
2124 	struct intel_uncore_forcewake_domain *d;
2125 
2126 	GEM_BUG_ON(domain_id >= FW_DOMAIN_ID_COUNT);
2127 
2128 	d = fetch_and_zero(&uncore->fw_domain[domain_id]);
2129 	if (!d)
2130 		return;
2131 
2132 	uncore->fw_domains &= ~BIT(domain_id);
2133 	drm_WARN_ON(&uncore->i915->drm, d->wake_count);
2134 	drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
2135 	kfree(d);
2136 }
2137 
2138 static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
2139 {
2140 	struct intel_uncore_forcewake_domain *d;
2141 	int tmp;
2142 
2143 	for_each_fw_domain(d, uncore, tmp)
2144 		fw_domain_fini(uncore, d->id);
2145 }
2146 
2147 static const struct intel_uncore_fw_get uncore_get_fallback = {
2148 	.force_wake_get = fw_domains_get_with_fallback
2149 };
2150 
2151 static const struct intel_uncore_fw_get uncore_get_normal = {
2152 	.force_wake_get = fw_domains_get_normal,
2153 };
2154 
2155 static const struct intel_uncore_fw_get uncore_get_thread_status = {
2156 	.force_wake_get = fw_domains_get_with_thread_status
2157 };
2158 
2159 static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
2160 {
2161 	struct drm_i915_private *i915 = uncore->i915;
2162 	int ret = 0;
2163 
2164 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2165 
2166 #define fw_domain_init(uncore__, id__, set__, ack__) \
2167 	(ret ?: (ret = __fw_domain_init((uncore__), (id__), (set__), (ack__))))
2168 
2169 	if (GRAPHICS_VER(i915) >= 11) {
2170 		intel_engine_mask_t emask;
2171 		int i;
2172 
2173 		/* we'll prune the domains of missing engines later */
2174 		emask = uncore->gt->info.engine_mask;
2175 
2176 		uncore->fw_get_funcs = &uncore_get_fallback;
2177 		if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2178 			fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2179 				       FORCEWAKE_GT_GEN9,
2180 				       FORCEWAKE_ACK_GT_MTL);
2181 		else
2182 			fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2183 				       FORCEWAKE_GT_GEN9,
2184 				       FORCEWAKE_ACK_GT_GEN9);
2185 
2186 		if (RCS_MASK(uncore->gt) || CCS_MASK(uncore->gt))
2187 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2188 				       FORCEWAKE_RENDER_GEN9,
2189 				       FORCEWAKE_ACK_RENDER_GEN9);
2190 
2191 		for (i = 0; i < I915_MAX_VCS; i++) {
2192 			if (!__HAS_ENGINE(emask, _VCS(i)))
2193 				continue;
2194 
2195 			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
2196 				       FORCEWAKE_MEDIA_VDBOX_GEN11(i),
2197 				       FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
2198 		}
2199 		for (i = 0; i < I915_MAX_VECS; i++) {
2200 			if (!__HAS_ENGINE(emask, _VECS(i)))
2201 				continue;
2202 
2203 			fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
2204 				       FORCEWAKE_MEDIA_VEBOX_GEN11(i),
2205 				       FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
2206 		}
2207 
2208 		if (uncore->gt->type == GT_MEDIA)
2209 			fw_domain_init(uncore, FW_DOMAIN_ID_GSC,
2210 				       FORCEWAKE_REQ_GSC, FORCEWAKE_ACK_GSC);
2211 	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2212 		uncore->fw_get_funcs = &uncore_get_fallback;
2213 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2214 			       FORCEWAKE_RENDER_GEN9,
2215 			       FORCEWAKE_ACK_RENDER_GEN9);
2216 		fw_domain_init(uncore, FW_DOMAIN_ID_GT,
2217 			       FORCEWAKE_GT_GEN9,
2218 			       FORCEWAKE_ACK_GT_GEN9);
2219 		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2220 			       FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
2221 	} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
2222 		uncore->fw_get_funcs = &uncore_get_normal;
2223 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2224 			       FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
2225 		fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
2226 			       FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
2227 	} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
2228 		uncore->fw_get_funcs = &uncore_get_thread_status;
2229 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2230 			       FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
2231 	} else if (IS_IVYBRIDGE(i915)) {
2232 		u32 ecobus;
2233 
2234 		/* IVB configs may use multi-threaded forcewake */
2235 
2236 		/* A small trick here - if the bios hasn't configured
2237 		 * MT forcewake, and if the device is in RC6, then
2238 		 * force_wake_mt_get will not wake the device and the
2239 		 * ECOBUS read will return zero. Which will be
2240 		 * (correctly) interpreted by the test below as MT
2241 		 * forcewake being disabled.
2242 		 */
2243 		uncore->fw_get_funcs = &uncore_get_thread_status;
2244 
2245 		/* We need to init first for ECOBUS access and then
2246 		 * determine later if we want to reinit, in case of MT access is
2247 		 * not working. In this stage we don't know which flavour this
2248 		 * ivb is, so it is better to reset also the gen6 fw registers
2249 		 * before the ecobus check.
2250 		 */
2251 
2252 		__raw_uncore_write32(uncore, FORCEWAKE, 0);
2253 		__raw_posting_read(uncore, ECOBUS);
2254 
2255 		ret = __fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2256 				       FORCEWAKE_MT, FORCEWAKE_MT_ACK);
2257 		if (ret)
2258 			goto out;
2259 
2260 		spin_lock_irq(&uncore->lock);
2261 		fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
2262 		ecobus = __raw_uncore_read32(uncore, ECOBUS);
2263 		fw_domains_put(uncore, FORCEWAKE_RENDER);
2264 		spin_unlock_irq(&uncore->lock);
2265 
2266 		if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
2267 			drm_info(&i915->drm, "No MT forcewake available on Ivybridge, this can result in issues\n");
2268 			drm_info(&i915->drm, "when using vblank-synced partial screen updates.\n");
2269 			fw_domain_fini(uncore, FW_DOMAIN_ID_RENDER);
2270 			fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2271 				       FORCEWAKE, FORCEWAKE_ACK);
2272 		}
2273 	} else if (GRAPHICS_VER(i915) == 6) {
2274 		uncore->fw_get_funcs = &uncore_get_thread_status;
2275 		fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
2276 			       FORCEWAKE, FORCEWAKE_ACK);
2277 	}
2278 
2279 #undef fw_domain_init
2280 
2281 	/* All future platforms are expected to require complex power gating */
2282 	drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
2283 
2284 out:
2285 	if (ret)
2286 		intel_uncore_fw_domains_fini(uncore);
2287 
2288 	return ret;
2289 }
2290 
2291 #define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
2292 { \
2293 	(uncore)->fw_domains_table = \
2294 			(struct intel_forcewake_range *)(d); \
2295 	(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
2296 }
2297 
2298 #define ASSIGN_SHADOW_TABLE(uncore, d) \
2299 { \
2300 	(uncore)->shadowed_reg_table = d; \
2301 	(uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
2302 }
2303 
2304 static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
2305 					 unsigned long action, void *data)
2306 {
2307 	struct intel_uncore *uncore = container_of(nb,
2308 			struct intel_uncore, pmic_bus_access_nb);
2309 
2310 	switch (action) {
2311 	case MBI_PMIC_BUS_ACCESS_BEGIN:
2312 		/*
2313 		 * forcewake all now to make sure that we don't need to do a
2314 		 * forcewake later which on systems where this notifier gets
2315 		 * called requires the punit to access to the shared pmic i2c
2316 		 * bus, which will be busy after this notification, leading to:
2317 		 * "render: timed out waiting for forcewake ack request."
2318 		 * errors.
2319 		 *
2320 		 * The notifier is unregistered during intel_runtime_suspend(),
2321 		 * so it's ok to access the HW here without holding a RPM
2322 		 * wake reference -> disable wakeref asserts for the time of
2323 		 * the access.
2324 		 */
2325 		disable_rpm_wakeref_asserts(uncore->rpm);
2326 		intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
2327 		enable_rpm_wakeref_asserts(uncore->rpm);
2328 		break;
2329 	case MBI_PMIC_BUS_ACCESS_END:
2330 		intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
2331 		break;
2332 	}
2333 
2334 	return NOTIFY_OK;
2335 }
2336 
2337 static void uncore_unmap_mmio(struct drm_device *drm, void *regs)
2338 {
2339 	iounmap((void __iomem *)regs);
2340 }
2341 
2342 int intel_uncore_setup_mmio(struct intel_uncore *uncore, phys_addr_t phys_addr)
2343 {
2344 	struct drm_i915_private *i915 = uncore->i915;
2345 	int mmio_size;
2346 
2347 	/*
2348 	 * Before gen4, the registers and the GTT are behind different BARs.
2349 	 * However, from gen4 onwards, the registers and the GTT are shared
2350 	 * in the same BAR, so we want to restrict this ioremap from
2351 	 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
2352 	 * the register BAR remains the same size for all the earlier
2353 	 * generations up to Ironlake.
2354 	 * For dgfx chips register range is expanded to 4MB, and this larger
2355 	 * range is also used for integrated gpus beginning with Meteor Lake.
2356 	 */
2357 	if (IS_DGFX(i915) || GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70))
2358 		mmio_size = 4 * 1024 * 1024;
2359 	else if (GRAPHICS_VER(i915) >= 5)
2360 		mmio_size = 2 * 1024 * 1024;
2361 	else
2362 		mmio_size = 512 * 1024;
2363 
2364 	uncore->regs = ioremap(phys_addr, mmio_size);
2365 	if (uncore->regs == NULL) {
2366 		drm_err(&i915->drm, "failed to map registers\n");
2367 		return -EIO;
2368 	}
2369 
2370 	return drmm_add_action_or_reset(&i915->drm, uncore_unmap_mmio,
2371 					(void __force *)uncore->regs);
2372 }
2373 
2374 void intel_uncore_init_early(struct intel_uncore *uncore,
2375 			     struct intel_gt *gt)
2376 {
2377 	spin_lock_init(&uncore->lock);
2378 	uncore->i915 = gt->i915;
2379 	uncore->gt = gt;
2380 	uncore->rpm = &gt->i915->runtime_pm;
2381 }
2382 
2383 static void uncore_raw_init(struct intel_uncore *uncore)
2384 {
2385 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore));
2386 
2387 	if (intel_vgpu_active(uncore->i915)) {
2388 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, vgpu);
2389 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, vgpu);
2390 	} else if (GRAPHICS_VER(uncore->i915) == 5) {
2391 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen5);
2392 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen5);
2393 	} else {
2394 		ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, gen2);
2395 		ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, gen2);
2396 	}
2397 }
2398 
2399 static int uncore_media_forcewake_init(struct intel_uncore *uncore)
2400 {
2401 	struct drm_i915_private *i915 = uncore->i915;
2402 
2403 	if (MEDIA_VER(i915) >= 13) {
2404 		ASSIGN_FW_DOMAINS_TABLE(uncore, __xelpmp_fw_ranges);
2405 		ASSIGN_SHADOW_TABLE(uncore, xelpmp_shadowed_regs);
2406 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2407 	} else {
2408 		MISSING_CASE(MEDIA_VER(i915));
2409 		return -ENODEV;
2410 	}
2411 
2412 	return 0;
2413 }
2414 
2415 static int uncore_forcewake_init(struct intel_uncore *uncore)
2416 {
2417 	struct drm_i915_private *i915 = uncore->i915;
2418 	int ret;
2419 
2420 	GEM_BUG_ON(!intel_uncore_has_forcewake(uncore));
2421 
2422 	ret = intel_uncore_fw_domains_init(uncore);
2423 	if (ret)
2424 		return ret;
2425 	forcewake_early_sanitize(uncore, 0);
2426 
2427 	ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
2428 
2429 	if (uncore->gt->type == GT_MEDIA)
2430 		return uncore_media_forcewake_init(uncore);
2431 
2432 	if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 70)) {
2433 		ASSIGN_FW_DOMAINS_TABLE(uncore, __mtl_fw_ranges);
2434 		ASSIGN_SHADOW_TABLE(uncore, mtl_shadowed_regs);
2435 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2436 	} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
2437 		ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
2438 		ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
2439 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2440 	} else if (GRAPHICS_VER(i915) >= 12) {
2441 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
2442 		ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
2443 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2444 	} else if (GRAPHICS_VER(i915) == 11) {
2445 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
2446 		ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
2447 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2448 	} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
2449 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
2450 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2451 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2452 	} else if (IS_CHERRYVIEW(i915)) {
2453 		ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
2454 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2455 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2456 	} else if (GRAPHICS_VER(i915) == 8) {
2457 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2458 		ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
2459 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
2460 	} else if (IS_VALLEYVIEW(i915)) {
2461 		ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
2462 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2463 	} else if (IS_GRAPHICS_VER(i915, 6, 7)) {
2464 		ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
2465 		ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
2466 	}
2467 
2468 	uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
2469 	iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
2470 
2471 	return 0;
2472 }
2473 
2474 static int sanity_check_mmio_access(struct intel_uncore *uncore)
2475 {
2476 	struct drm_i915_private *i915 = uncore->i915;
2477 
2478 	if (GRAPHICS_VER(i915) < 8)
2479 		return 0;
2480 
2481 	/*
2482 	 * Sanitycheck that MMIO access to the device is working properly.  If
2483 	 * the CPU is unable to communicate with a PCI device, BAR reads will
2484 	 * return 0xFFFFFFFF.  Let's make sure the device isn't in this state
2485 	 * before we start trying to access registers.
2486 	 *
2487 	 * We use the primary GT's forcewake register as our guinea pig since
2488 	 * it's been around since HSW and it's a masked register so the upper
2489 	 * 16 bits can never read back as 1's if device access is operating
2490 	 * properly.
2491 	 *
2492 	 * If MMIO isn't working, we'll wait up to 2 seconds to see if it
2493 	 * recovers, then give up.
2494 	 */
2495 #define COND (__raw_uncore_read32(uncore, FORCEWAKE_MT) != ~0)
2496 	if (wait_for(COND, 2000) == -ETIMEDOUT) {
2497 		drm_err(&i915->drm, "Device is non-operational; MMIO access returns 0xFFFFFFFF!\n");
2498 		return -EIO;
2499 	}
2500 
2501 	return 0;
2502 }
2503 
2504 int intel_uncore_init_mmio(struct intel_uncore *uncore)
2505 {
2506 	struct drm_i915_private *i915 = uncore->i915;
2507 	struct intel_display *display = i915->display;
2508 	int ret;
2509 
2510 	ret = sanity_check_mmio_access(uncore);
2511 	if (ret)
2512 		return ret;
2513 
2514 	/*
2515 	 * The boot firmware initializes local memory and assesses its health.
2516 	 * If memory training fails, the punit will have been instructed to
2517 	 * keep the GT powered down; we won't be able to communicate with it
2518 	 * and we should not continue with driver initialization.
2519 	 */
2520 	if (IS_DGFX(i915) &&
2521 	    !(__raw_uncore_read32(uncore, GU_CNTL) & LMEM_INIT)) {
2522 		drm_err(&i915->drm, "LMEM not initialized by firmware\n");
2523 		return -ENODEV;
2524 	}
2525 
2526 	if (GRAPHICS_VER(i915) > 5 && !intel_vgpu_active(i915))
2527 		uncore->flags |= UNCORE_HAS_FORCEWAKE;
2528 
2529 	if (!intel_uncore_has_forcewake(uncore)) {
2530 		uncore_raw_init(uncore);
2531 	} else {
2532 		ret = uncore_forcewake_init(uncore);
2533 		if (ret)
2534 			return ret;
2535 	}
2536 
2537 	/* make sure fw funcs are set if and only if we have fw*/
2538 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
2539 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
2540 	GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
2541 
2542 	if (HAS_FPGA_DBG_UNCLAIMED(display))
2543 		uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
2544 
2545 	if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
2546 		uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
2547 
2548 	if (IS_GRAPHICS_VER(i915, 6, 7))
2549 		uncore->flags |= UNCORE_HAS_FIFO;
2550 
2551 	/* clear out unclaimed reg detection bit */
2552 	if (intel_uncore_unclaimed_mmio(uncore))
2553 		drm_dbg(&i915->drm, "unclaimed mmio detected on uncore init, clearing\n");
2554 
2555 	return 0;
2556 }
2557 
2558 /*
2559  * We might have detected that some engines are fused off after we initialized
2560  * the forcewake domains. Prune them, to make sure they only reference existing
2561  * engines.
2562  */
2563 void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
2564 					  struct intel_gt *gt)
2565 {
2566 	enum forcewake_domains fw_domains = uncore->fw_domains;
2567 	enum forcewake_domain_id domain_id;
2568 	int i;
2569 
2570 	if (!intel_uncore_has_forcewake(uncore) || GRAPHICS_VER(uncore->i915) < 11)
2571 		return;
2572 
2573 	for (i = 0; i < I915_MAX_VCS; i++) {
2574 		domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
2575 
2576 		if (HAS_ENGINE(gt, _VCS(i)))
2577 			continue;
2578 
2579 		/*
2580 		 * Starting with XeHP, the power well for an even-numbered
2581 		 * VDBOX is also used for shared units within the
2582 		 * media slice such as SFC.  So even if the engine
2583 		 * itself is fused off, we still need to initialize
2584 		 * the forcewake domain if any of the other engines
2585 		 * in the same media slice are present.
2586 		 */
2587 		if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 55) && i % 2 == 0) {
2588 			if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
2589 				continue;
2590 
2591 			if (HAS_ENGINE(gt, _VECS(i / 2)))
2592 				continue;
2593 		}
2594 
2595 		if (fw_domains & BIT(domain_id))
2596 			fw_domain_fini(uncore, domain_id);
2597 	}
2598 
2599 	for (i = 0; i < I915_MAX_VECS; i++) {
2600 		domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
2601 
2602 		if (HAS_ENGINE(gt, _VECS(i)))
2603 			continue;
2604 
2605 		if (fw_domains & BIT(domain_id))
2606 			fw_domain_fini(uncore, domain_id);
2607 	}
2608 
2609 	if ((fw_domains & BIT(FW_DOMAIN_ID_GSC)) && !HAS_ENGINE(gt, GSC0))
2610 		fw_domain_fini(uncore, FW_DOMAIN_ID_GSC);
2611 }
2612 
2613 /*
2614  * The driver-initiated FLR is the highest level of reset that we can trigger
2615  * from within the driver. It is different from the PCI FLR in that it doesn't
2616  * fully reset the SGUnit and doesn't modify the PCI config space and therefore
2617  * it doesn't require a re-enumeration of the PCI BARs. However, the
2618  * driver-initiated FLR does still cause a reset of both GT and display and a
2619  * memory wipe of local and stolen memory, so recovery would require a full HW
2620  * re-init and saving/restoring (or re-populating) the wiped memory. Since we
2621  * perform the FLR as the very last action before releasing access to the HW
2622  * during the driver release flow, we don't attempt recovery at all, because
2623  * if/when a new instance of i915 is bound to the device it will do a full
2624  * re-init anyway.
2625  */
2626 static void driver_initiated_flr(struct intel_uncore *uncore)
2627 {
2628 	struct drm_i915_private *i915 = uncore->i915;
2629 	unsigned int flr_timeout_ms;
2630 	int ret;
2631 
2632 	drm_dbg(&i915->drm, "Triggering Driver-FLR\n");
2633 
2634 	/*
2635 	 * The specification recommends a 3 seconds FLR reset timeout. To be
2636 	 * cautious, we will extend this to 9 seconds, three times the specified
2637 	 * timeout.
2638 	 */
2639 	flr_timeout_ms = 9000;
2640 
2641 	/*
2642 	 * Make sure any pending FLR requests have cleared by waiting for the
2643 	 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
2644 	 * to make sure it's not still set from a prior attempt (it's a write to
2645 	 * clear bit).
2646 	 * Note that we should never be in a situation where a previous attempt
2647 	 * is still pending (unless the HW is totally dead), but better to be
2648 	 * safe in case something unexpected happens
2649 	 */
2650 	ret = intel_wait_for_register_fw(uncore, GU_CNTL, DRIVERFLR, 0, flr_timeout_ms, NULL);
2651 	if (ret) {
2652 		drm_err(&i915->drm,
2653 			"Failed to wait for Driver-FLR bit to clear! %d\n",
2654 			ret);
2655 		return;
2656 	}
2657 	intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2658 
2659 	/* Trigger the actual Driver-FLR */
2660 	intel_uncore_rmw_fw(uncore, GU_CNTL, 0, DRIVERFLR);
2661 
2662 	/* Wait for hardware teardown to complete */
2663 	ret = intel_wait_for_register_fw(uncore, GU_CNTL,
2664 					 DRIVERFLR, 0,
2665 					 flr_timeout_ms, NULL);
2666 	if (ret) {
2667 		drm_err(&i915->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
2668 		return;
2669 	}
2670 
2671 	/* Wait for hardware/firmware re-init to complete */
2672 	ret = intel_wait_for_register_fw(uncore, GU_DEBUG,
2673 					 DRIVERFLR_STATUS, DRIVERFLR_STATUS,
2674 					 flr_timeout_ms, NULL);
2675 	if (ret) {
2676 		drm_err(&i915->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
2677 		return;
2678 	}
2679 
2680 	/* Clear sticky completion status */
2681 	intel_uncore_write_fw(uncore, GU_DEBUG, DRIVERFLR_STATUS);
2682 }
2683 
2684 /* Called via drm-managed action */
2685 void intel_uncore_fini_mmio(struct drm_device *dev, void *data)
2686 {
2687 	struct intel_uncore *uncore = data;
2688 
2689 	if (intel_uncore_has_forcewake(uncore)) {
2690 		iosf_mbi_punit_acquire();
2691 		iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
2692 			&uncore->pmic_bus_access_nb);
2693 		intel_uncore_forcewake_reset(uncore);
2694 		intel_uncore_fw_domains_fini(uncore);
2695 		iosf_mbi_punit_release();
2696 	}
2697 
2698 	if (intel_uncore_needs_flr_on_fini(uncore))
2699 		driver_initiated_flr(uncore);
2700 }
2701 
2702 /**
2703  * __intel_wait_for_register_fw - wait until register matches expected state
2704  * @uncore: the struct intel_uncore
2705  * @reg: the register to read
2706  * @mask: mask to apply to register value
2707  * @value: expected value
2708  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2709  * @slow_timeout_ms: slow timeout in millisecond
2710  * @out_value: optional placeholder to hold registry value
2711  *
2712  * This routine waits until the target register @reg contains the expected
2713  * @value after applying the @mask, i.e. it waits until ::
2714  *
2715  *     (intel_uncore_read_fw(uncore, reg) & mask) == value
2716  *
2717  * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds.
2718  * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us
2719  * must be not larger than 20,0000 microseconds.
2720  *
2721  * Note that this routine assumes the caller holds forcewake asserted, it is
2722  * not suitable for very long waits. See intel_wait_for_register() if you
2723  * wish to wait without holding forcewake for the duration (i.e. you expect
2724  * the wait to be slow).
2725  *
2726  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2727  */
2728 int __intel_wait_for_register_fw(struct intel_uncore *uncore,
2729 				 i915_reg_t reg,
2730 				 u32 mask,
2731 				 u32 value,
2732 				 unsigned int fast_timeout_us,
2733 				 unsigned int slow_timeout_ms,
2734 				 u32 *out_value)
2735 {
2736 	u32 reg_value = 0;
2737 #define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
2738 	int ret;
2739 
2740 	/* Catch any overuse of this function */
2741 	might_sleep_if(slow_timeout_ms);
2742 	GEM_BUG_ON(fast_timeout_us > 20000);
2743 	GEM_BUG_ON(!fast_timeout_us && !slow_timeout_ms);
2744 
2745 	ret = -ETIMEDOUT;
2746 	if (fast_timeout_us && fast_timeout_us <= 20000)
2747 		ret = _wait_for_atomic(done, fast_timeout_us, 0);
2748 	if (ret && slow_timeout_ms)
2749 		ret = wait_for(done, slow_timeout_ms);
2750 
2751 	if (out_value)
2752 		*out_value = reg_value;
2753 
2754 	return ret;
2755 #undef done
2756 }
2757 
2758 /**
2759  * __intel_wait_for_register - wait until register matches expected state
2760  * @uncore: the struct intel_uncore
2761  * @reg: the register to read
2762  * @mask: mask to apply to register value
2763  * @value: expected value
2764  * @fast_timeout_us: fast timeout in microsecond for atomic/tight wait
2765  * @slow_timeout_ms: slow timeout in millisecond
2766  * @out_value: optional placeholder to hold registry value
2767  *
2768  * This routine waits until the target register @reg contains the expected
2769  * @value after applying the @mask, i.e. it waits until ::
2770  *
2771  *     (intel_uncore_read(uncore, reg) & mask) == value
2772  *
2773  * Otherwise, the wait will timeout after @timeout_ms milliseconds.
2774  *
2775  * Return: 0 if the register matches the desired condition, or -ETIMEDOUT.
2776  */
2777 int __intel_wait_for_register(struct intel_uncore *uncore,
2778 			      i915_reg_t reg,
2779 			      u32 mask,
2780 			      u32 value,
2781 			      unsigned int fast_timeout_us,
2782 			      unsigned int slow_timeout_ms,
2783 			      u32 *out_value)
2784 {
2785 	unsigned fw =
2786 		intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
2787 	u32 reg_value;
2788 	int ret;
2789 
2790 	might_sleep_if(slow_timeout_ms);
2791 
2792 	spin_lock_irq(&uncore->lock);
2793 	intel_uncore_forcewake_get__locked(uncore, fw);
2794 
2795 	ret = __intel_wait_for_register_fw(uncore,
2796 					   reg, mask, value,
2797 					   fast_timeout_us, 0, &reg_value);
2798 
2799 	intel_uncore_forcewake_put__locked(uncore, fw);
2800 	spin_unlock_irq(&uncore->lock);
2801 
2802 	if (ret && slow_timeout_ms)
2803 		ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
2804 								       reg),
2805 				 (reg_value & mask) == value,
2806 				 slow_timeout_ms * 1000, 10, 1000);
2807 
2808 	/* just trace the final value */
2809 	trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2810 
2811 	if (out_value)
2812 		*out_value = reg_value;
2813 
2814 	return ret;
2815 }
2816 
2817 bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
2818 {
2819 	bool ret;
2820 
2821 	if (!uncore->debug)
2822 		return false;
2823 
2824 	spin_lock_irq(&uncore->debug->lock);
2825 	ret = check_for_unclaimed_mmio(uncore);
2826 	spin_unlock_irq(&uncore->debug->lock);
2827 
2828 	return ret;
2829 }
2830 
2831 bool
2832 intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
2833 {
2834 	bool ret = false;
2835 
2836 	if (drm_WARN_ON(&uncore->i915->drm, !uncore->debug))
2837 		return false;
2838 
2839 	spin_lock_irq(&uncore->debug->lock);
2840 
2841 	if (unlikely(uncore->debug->unclaimed_mmio_check <= 0))
2842 		goto out;
2843 
2844 	if (unlikely(check_for_unclaimed_mmio(uncore))) {
2845 		if (!uncore->i915->params.mmio_debug) {
2846 			drm_dbg(&uncore->i915->drm,
2847 				"Unclaimed register detected, "
2848 				"enabling oneshot unclaimed register reporting. "
2849 				"Please use i915.mmio_debug=N for more information.\n");
2850 			uncore->i915->params.mmio_debug++;
2851 		}
2852 		uncore->debug->unclaimed_mmio_check--;
2853 		ret = true;
2854 	}
2855 
2856 out:
2857 	spin_unlock_irq(&uncore->debug->lock);
2858 
2859 	return ret;
2860 }
2861 
2862 /**
2863  * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
2864  * 				    a register
2865  * @uncore: pointer to struct intel_uncore
2866  * @reg: register in question
2867  * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
2868  *
2869  * Returns a set of forcewake domains required to be taken with for example
2870  * intel_uncore_forcewake_get for the specified register to be accessible in the
2871  * specified mode (read, write or read/write) with raw mmio accessors.
2872  *
2873  * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the
2874  * callers to do FIFO management on their own or risk losing writes.
2875  */
2876 enum forcewake_domains
2877 intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
2878 			       i915_reg_t reg, unsigned int op)
2879 {
2880 	enum forcewake_domains fw_domains = 0;
2881 
2882 	drm_WARN_ON(&uncore->i915->drm, !op);
2883 
2884 	if (!intel_uncore_has_forcewake(uncore))
2885 		return 0;
2886 
2887 	if (op & FW_REG_READ)
2888 		fw_domains = uncore->funcs.read_fw_domains(uncore, reg);
2889 
2890 	if (op & FW_REG_WRITE)
2891 		fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
2892 
2893 	drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
2894 
2895 	return fw_domains;
2896 }
2897 
2898 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2899 #include "selftests/mock_uncore.c"
2900 #include "selftests/intel_uncore.c"
2901 #endif
2902