xref: /linux/drivers/gpu/drm/i915/gt/intel_gt_pm.c (revision ebf68996de0ab250c5d520eb2291ab65643e9a1e)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "i915_drv.h"
8 #include "intel_gt_pm.h"
9 #include "intel_pm.h"
10 #include "intel_wakeref.h"
11 
12 static void pm_notify(struct drm_i915_private *i915, int state)
13 {
14 	blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
15 }
16 
17 static int intel_gt_unpark(struct intel_wakeref *wf)
18 {
19 	struct drm_i915_private *i915 =
20 		container_of(wf, typeof(*i915), gt.wakeref);
21 
22 	GEM_TRACE("\n");
23 
24 	/*
25 	 * It seems that the DMC likes to transition between the DC states a lot
26 	 * when there are no connected displays (no active power domains) during
27 	 * command submission.
28 	 *
29 	 * This activity has negative impact on the performance of the chip with
30 	 * huge latencies observed in the interrupt handler and elsewhere.
31 	 *
32 	 * Work around it by grabbing a GT IRQ power domain whilst there is any
33 	 * GT activity, preventing any DC state transitions.
34 	 */
35 	i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
36 	GEM_BUG_ON(!i915->gt.awake);
37 
38 	intel_enable_gt_powersave(i915);
39 
40 	i915_update_gfx_val(i915);
41 	if (INTEL_GEN(i915) >= 6)
42 		gen6_rps_busy(i915);
43 
44 	i915_pmu_gt_unparked(i915);
45 
46 	i915_queue_hangcheck(i915);
47 
48 	pm_notify(i915, INTEL_GT_UNPARK);
49 
50 	return 0;
51 }
52 
53 void intel_gt_pm_get(struct drm_i915_private *i915)
54 {
55 	intel_wakeref_get(i915, &i915->gt.wakeref, intel_gt_unpark);
56 }
57 
58 static int intel_gt_park(struct intel_wakeref *wf)
59 {
60 	struct drm_i915_private *i915 =
61 		container_of(wf, typeof(*i915), gt.wakeref);
62 	intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake);
63 
64 	GEM_TRACE("\n");
65 
66 	pm_notify(i915, INTEL_GT_PARK);
67 
68 	i915_pmu_gt_parked(i915);
69 	if (INTEL_GEN(i915) >= 6)
70 		gen6_rps_idle(i915);
71 
72 	GEM_BUG_ON(!wakeref);
73 	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
74 
75 	return 0;
76 }
77 
78 void intel_gt_pm_put(struct drm_i915_private *i915)
79 {
80 	intel_wakeref_put(i915, &i915->gt.wakeref, intel_gt_park);
81 }
82 
83 void intel_gt_pm_init(struct drm_i915_private *i915)
84 {
85 	intel_wakeref_init(&i915->gt.wakeref);
86 	BLOCKING_INIT_NOTIFIER_HEAD(&i915->gt.pm_notifications);
87 }
88 
89 static bool reset_engines(struct drm_i915_private *i915)
90 {
91 	if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
92 		return false;
93 
94 	return intel_gpu_reset(i915, ALL_ENGINES) == 0;
95 }
96 
97 /**
98  * intel_gt_sanitize: called after the GPU has lost power
99  * @i915: the i915 device
100  * @force: ignore a failed reset and sanitize engine state anyway
101  *
102  * Anytime we reset the GPU, either with an explicit GPU reset or through a
103  * PCI power cycle, the GPU loses state and we must reset our state tracking
104  * to match. Note that calling intel_gt_sanitize() if the GPU has not
105  * been reset results in much confusion!
106  */
107 void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
108 {
109 	struct intel_engine_cs *engine;
110 	enum intel_engine_id id;
111 
112 	GEM_TRACE("\n");
113 
114 	if (!reset_engines(i915) && !force)
115 		return;
116 
117 	for_each_engine(engine, i915, id)
118 		intel_engine_reset(engine, false);
119 }
120 
121 void intel_gt_resume(struct drm_i915_private *i915)
122 {
123 	struct intel_engine_cs *engine;
124 	enum intel_engine_id id;
125 
126 	/*
127 	 * After resume, we may need to poke into the pinned kernel
128 	 * contexts to paper over any damage caused by the sudden suspend.
129 	 * Only the kernel contexts should remain pinned over suspend,
130 	 * allowing us to fixup the user contexts on their first pin.
131 	 */
132 	for_each_engine(engine, i915, id) {
133 		struct intel_context *ce;
134 
135 		ce = engine->kernel_context;
136 		if (ce)
137 			ce->ops->reset(ce);
138 
139 		ce = engine->preempt_context;
140 		if (ce)
141 			ce->ops->reset(ce);
142 	}
143 }
144