xref: /linux/drivers/gpu/drm/i915/intel_runtime_pm.h (revision e77a8005748547fb1f10645097f13ccdd804d7e5)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2019 Intel Corporation
4  */
5 
6 #ifndef __INTEL_RUNTIME_PM_H__
7 #define __INTEL_RUNTIME_PM_H__
8 
9 #include <linux/pm_runtime.h>
10 #include <linux/types.h>
11 
12 #include "intel_wakeref.h"
13 
14 struct device;
15 struct drm_i915_private;
16 struct drm_printer;
17 
18 /*
19  * This struct helps tracking the state needed for runtime PM, which puts the
20  * device in PCI D3 state. Notice that when this happens, nothing on the
21  * graphics device works, even register access, so we don't get interrupts nor
22  * anything else.
23  *
24  * Every piece of our code that needs to actually touch the hardware needs to
25  * either call intel_runtime_pm_get or call intel_display_power_get with the
26  * appropriate power domain.
27  *
28  * Our driver uses the autosuspend delay feature, which means we'll only really
29  * suspend if we stay with zero refcount for a certain amount of time. The
30  * default value is currently very conservative (see intel_runtime_pm_enable), but
31  * it can be changed with the standard runtime PM files from sysfs.
32  *
33  * The irqs_disabled variable becomes true exactly after we disable the IRQs and
34  * goes back to false exactly before we reenable the IRQs. We use this variable
35  * to check if someone is trying to enable/disable IRQs while they're supposed
36  * to be disabled. This shouldn't happen and we'll print some error messages in
37  * case it happens.
38  *
39  * For more, read the Documentation/power/runtime_pm.rst.
40  */
41 struct intel_runtime_pm {
42 	atomic_t wakeref_count;
43 	struct device *kdev; /* points to i915->drm.dev */
44 	bool available;
45 	bool no_wakeref_tracking;
46 
47 	/*
48 	 *  Protects access to lmem usefault list.
49 	 *  It is required, if we are outside of the runtime suspend path,
50 	 *  access to @lmem_userfault_list requires always first grabbing the
51 	 *  runtime pm, to ensure we can't race against runtime suspend.
52 	 *  Once we have that we also need to grab @lmem_userfault_lock,
53 	 *  at which point we have exclusive access.
54 	 *  The runtime suspend path is special since it doesn't really hold any locks,
55 	 *  but instead has exclusive access by virtue of all other accesses requiring
56 	 *  holding the runtime pm wakeref.
57 	 */
58 	spinlock_t lmem_userfault_lock;
59 
60 	/*
61 	 *  Keep list of userfaulted gem obj, which require to release their
62 	 *  mmap mappings at runtime suspend path.
63 	 */
64 	struct list_head lmem_userfault_list;
65 
66 	/* Manual runtime pm autosuspend delay for user GGTT/lmem mmaps */
67 	struct intel_wakeref_auto userfault_wakeref;
68 
69 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
70 	/*
71 	 * To aide detection of wakeref leaks and general misuse, we
72 	 * track all wakeref holders. With manual markup (i.e. returning
73 	 * a cookie to each rpm_get caller which they then supply to their
74 	 * paired rpm_put) we can remove corresponding pairs of and keep
75 	 * the array trimmed to active wakerefs.
76 	 */
77 	struct ref_tracker_dir debug;
78 #endif
79 };
80 
81 #define BITS_PER_WAKEREF	\
82 	BITS_PER_TYPE(typeof_member(struct intel_runtime_pm, wakeref_count))
83 #define INTEL_RPM_WAKELOCK_SHIFT	(BITS_PER_WAKEREF / 2)
84 #define INTEL_RPM_WAKELOCK_BIAS		(1 << INTEL_RPM_WAKELOCK_SHIFT)
85 #define INTEL_RPM_RAW_WAKEREF_MASK	(INTEL_RPM_WAKELOCK_BIAS - 1)
86 
87 static inline int
88 intel_rpm_raw_wakeref_count(int wakeref_count)
89 {
90 	return wakeref_count & INTEL_RPM_RAW_WAKEREF_MASK;
91 }
92 
93 static inline int
94 intel_rpm_wakelock_count(int wakeref_count)
95 {
96 	return wakeref_count >> INTEL_RPM_WAKELOCK_SHIFT;
97 }
98 
99 static inline bool
100 intel_runtime_pm_suspended(struct intel_runtime_pm *rpm)
101 {
102 	return pm_runtime_suspended(rpm->kdev);
103 }
104 
105 static inline void
106 assert_rpm_device_not_suspended(struct intel_runtime_pm *rpm)
107 {
108 	WARN_ONCE(intel_runtime_pm_suspended(rpm),
109 		  "Device suspended during HW access\n");
110 }
111 
112 static inline void
113 __assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm, int wakeref_count)
114 {
115 	assert_rpm_device_not_suspended(rpm);
116 	WARN_ONCE(!intel_rpm_raw_wakeref_count(wakeref_count),
117 		  "RPM raw-wakeref not held\n");
118 }
119 
120 static inline void
121 __assert_rpm_wakelock_held(struct intel_runtime_pm *rpm, int wakeref_count)
122 {
123 	__assert_rpm_raw_wakeref_held(rpm, wakeref_count);
124 	WARN_ONCE(!intel_rpm_wakelock_count(wakeref_count),
125 		  "RPM wakelock ref not held during HW access\n");
126 }
127 
128 static inline void
129 assert_rpm_raw_wakeref_held(struct intel_runtime_pm *rpm)
130 {
131 	__assert_rpm_raw_wakeref_held(rpm, atomic_read(&rpm->wakeref_count));
132 }
133 
134 static inline void
135 assert_rpm_wakelock_held(struct intel_runtime_pm *rpm)
136 {
137 	__assert_rpm_wakelock_held(rpm, atomic_read(&rpm->wakeref_count));
138 }
139 
140 /**
141  * disable_rpm_wakeref_asserts - disable the RPM assert checks
142  * @rpm: the intel_runtime_pm structure
143  *
144  * This function disable asserts that check if we hold an RPM wakelock
145  * reference, while keeping the device-not-suspended checks still enabled.
146  * It's meant to be used only in special circumstances where our rule about
147  * the wakelock refcount wrt. the device power state doesn't hold. According
148  * to this rule at any point where we access the HW or want to keep the HW in
149  * an active state we must hold an RPM wakelock reference acquired via one of
150  * the intel_runtime_pm_get() helpers. Currently there are a few special spots
151  * where this rule doesn't hold: the IRQ and suspend/resume handlers, the
152  * forcewake release timer, and the GPU RPS and hangcheck works. All other
153  * users should avoid using this function.
154  *
155  * Any calls to this function must have a symmetric call to
156  * enable_rpm_wakeref_asserts().
157  */
158 static inline void
159 disable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
160 {
161 	atomic_add(INTEL_RPM_WAKELOCK_BIAS + 1,
162 		   &rpm->wakeref_count);
163 }
164 
165 /**
166  * enable_rpm_wakeref_asserts - re-enable the RPM assert checks
167  * @rpm: the intel_runtime_pm structure
168  *
169  * This function re-enables the RPM assert checks after disabling them with
170  * disable_rpm_wakeref_asserts. It's meant to be used only in special
171  * circumstances otherwise its use should be avoided.
172  *
173  * Any calls to this function must have a symmetric call to
174  * disable_rpm_wakeref_asserts().
175  */
176 static inline void
177 enable_rpm_wakeref_asserts(struct intel_runtime_pm *rpm)
178 {
179 	atomic_sub(INTEL_RPM_WAKELOCK_BIAS + 1,
180 		   &rpm->wakeref_count);
181 }
182 
183 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
184 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
185 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
186 void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
187 void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm);
188 
189 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
190 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
191 intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm);
192 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm);
193 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm);
194 
195 #define with_intel_runtime_pm(rpm, wf) \
196 	for ((wf) = intel_runtime_pm_get(rpm); (wf); \
197 	     intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
198 
199 #define with_intel_runtime_pm_if_in_use(rpm, wf) \
200 	for ((wf) = intel_runtime_pm_get_if_in_use(rpm); (wf); \
201 	     intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
202 
203 #define with_intel_runtime_pm_if_active(rpm, wf) \
204 	for ((wf) = intel_runtime_pm_get_if_active(rpm); (wf); \
205 	     intel_runtime_pm_put((rpm), (wf)), (wf) = NULL)
206 
207 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm);
208 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
209 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
210 #else
211 static inline void
212 intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
213 {
214 	intel_runtime_pm_put_unchecked(rpm);
215 }
216 #endif
217 void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref);
218 
219 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
220 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
221 				    struct drm_printer *p);
222 #else
223 static inline void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
224 						  struct drm_printer *p)
225 {
226 }
227 #endif
228 
229 #endif /* __INTEL_RUNTIME_PM_H__ */
230