xref: /linux/drivers/gpu/drm/i915/intel_runtime_pm.c (revision dd08ebf6c3525a7ea2186e636df064ea47281987)
1 /*
2  * Copyright © 2012-2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *    Daniel Vetter <daniel.vetter@ffwll.ch>
26  *
27  */
28 
29 #include <linux/pm_runtime.h>
30 
31 #include <drm/drm_print.h>
32 
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35 
36 /**
37  * DOC: runtime pm
38  *
39  * The i915 driver supports dynamic enabling and disabling of entire hardware
40  * blocks at runtime. This is especially important on the display side where
41  * software is supposed to control many power gates manually on recent hardware,
42  * since on the GT side a lot of the power management is done by the hardware.
43  * But even there some manual control at the device level is required.
44  *
45  * Since i915 supports a diverse set of platforms with a unified codebase and
46  * hardware engineers just love to shuffle functionality around between power
47  * domains there's a sizeable amount of indirection required. This file provides
48  * generic functions to the driver for grabbing and releasing references for
49  * abstract power domains. It then maps those to the actual power wells
50  * present for a given platform.
51  */
52 
53 static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
54 {
55 	return container_of(rpm, struct drm_i915_private, runtime_pm);
56 }
57 
58 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
59 
60 #include <linux/sort.h>
61 
62 #define STACKDEPTH 8
63 
64 static noinline depot_stack_handle_t __save_depot_stack(void)
65 {
66 	unsigned long entries[STACKDEPTH];
67 	unsigned int n;
68 
69 	n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
70 	return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
71 }
72 
73 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
74 {
75 	spin_lock_init(&rpm->debug.lock);
76 	stack_depot_init();
77 }
78 
79 static noinline depot_stack_handle_t
80 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
81 {
82 	depot_stack_handle_t stack, *stacks;
83 	unsigned long flags;
84 
85 	if (rpm->no_wakeref_tracking)
86 		return -1;
87 
88 	stack = __save_depot_stack();
89 	if (!stack)
90 		return -1;
91 
92 	spin_lock_irqsave(&rpm->debug.lock, flags);
93 
94 	if (!rpm->debug.count)
95 		rpm->debug.last_acquire = stack;
96 
97 	stacks = krealloc(rpm->debug.owners,
98 			  (rpm->debug.count + 1) * sizeof(*stacks),
99 			  GFP_NOWAIT | __GFP_NOWARN);
100 	if (stacks) {
101 		stacks[rpm->debug.count++] = stack;
102 		rpm->debug.owners = stacks;
103 	} else {
104 		stack = -1;
105 	}
106 
107 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
108 
109 	return stack;
110 }
111 
112 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
113 					     depot_stack_handle_t stack)
114 {
115 	struct drm_i915_private *i915 = container_of(rpm,
116 						     struct drm_i915_private,
117 						     runtime_pm);
118 	unsigned long flags, n;
119 	bool found = false;
120 
121 	if (unlikely(stack == -1))
122 		return;
123 
124 	spin_lock_irqsave(&rpm->debug.lock, flags);
125 	for (n = rpm->debug.count; n--; ) {
126 		if (rpm->debug.owners[n] == stack) {
127 			memmove(rpm->debug.owners + n,
128 				rpm->debug.owners + n + 1,
129 				(--rpm->debug.count - n) * sizeof(stack));
130 			found = true;
131 			break;
132 		}
133 	}
134 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
135 
136 	if (drm_WARN(&i915->drm, !found,
137 		     "Unmatched wakeref (tracking %lu), count %u\n",
138 		     rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
139 		char *buf;
140 
141 		buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
142 		if (!buf)
143 			return;
144 
145 		stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
146 		DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
147 
148 		stack = READ_ONCE(rpm->debug.last_release);
149 		if (stack) {
150 			stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
151 			DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
152 		}
153 
154 		kfree(buf);
155 	}
156 }
157 
158 static int cmphandle(const void *_a, const void *_b)
159 {
160 	const depot_stack_handle_t * const a = _a, * const b = _b;
161 
162 	if (*a < *b)
163 		return -1;
164 	else if (*a > *b)
165 		return 1;
166 	else
167 		return 0;
168 }
169 
170 static void
171 __print_intel_runtime_pm_wakeref(struct drm_printer *p,
172 				 const struct intel_runtime_pm_debug *dbg)
173 {
174 	unsigned long i;
175 	char *buf;
176 
177 	buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
178 	if (!buf)
179 		return;
180 
181 	if (dbg->last_acquire) {
182 		stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2);
183 		drm_printf(p, "Wakeref last acquired:\n%s", buf);
184 	}
185 
186 	if (dbg->last_release) {
187 		stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2);
188 		drm_printf(p, "Wakeref last released:\n%s", buf);
189 	}
190 
191 	drm_printf(p, "Wakeref count: %lu\n", dbg->count);
192 
193 	sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
194 
195 	for (i = 0; i < dbg->count; i++) {
196 		depot_stack_handle_t stack = dbg->owners[i];
197 		unsigned long rep;
198 
199 		rep = 1;
200 		while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
201 			rep++, i++;
202 		stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
203 		drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
204 	}
205 
206 	kfree(buf);
207 }
208 
209 static noinline void
210 __untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
211 		       struct intel_runtime_pm_debug *saved)
212 {
213 	*saved = *debug;
214 
215 	debug->owners = NULL;
216 	debug->count = 0;
217 	debug->last_release = __save_depot_stack();
218 }
219 
220 static void
221 dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
222 {
223 	if (debug->count) {
224 		struct drm_printer p = drm_debug_printer("i915");
225 
226 		__print_intel_runtime_pm_wakeref(&p, debug);
227 	}
228 
229 	kfree(debug->owners);
230 }
231 
232 static noinline void
233 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
234 {
235 	struct intel_runtime_pm_debug dbg = {};
236 	unsigned long flags;
237 
238 	if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
239 					 &rpm->debug.lock,
240 					 flags))
241 		return;
242 
243 	__untrack_all_wakerefs(&rpm->debug, &dbg);
244 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
245 
246 	dump_and_free_wakeref_tracking(&dbg);
247 }
248 
249 static noinline void
250 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
251 {
252 	struct intel_runtime_pm_debug dbg = {};
253 	unsigned long flags;
254 
255 	spin_lock_irqsave(&rpm->debug.lock, flags);
256 	__untrack_all_wakerefs(&rpm->debug, &dbg);
257 	spin_unlock_irqrestore(&rpm->debug.lock, flags);
258 
259 	dump_and_free_wakeref_tracking(&dbg);
260 }
261 
262 void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
263 				    struct drm_printer *p)
264 {
265 	struct intel_runtime_pm_debug dbg = {};
266 
267 	do {
268 		unsigned long alloc = dbg.count;
269 		depot_stack_handle_t *s;
270 
271 		spin_lock_irq(&rpm->debug.lock);
272 		dbg.count = rpm->debug.count;
273 		if (dbg.count <= alloc) {
274 			memcpy(dbg.owners,
275 			       rpm->debug.owners,
276 			       dbg.count * sizeof(*s));
277 		}
278 		dbg.last_acquire = rpm->debug.last_acquire;
279 		dbg.last_release = rpm->debug.last_release;
280 		spin_unlock_irq(&rpm->debug.lock);
281 		if (dbg.count <= alloc)
282 			break;
283 
284 		s = krealloc(dbg.owners,
285 			     dbg.count * sizeof(*s),
286 			     GFP_NOWAIT | __GFP_NOWARN);
287 		if (!s)
288 			goto out;
289 
290 		dbg.owners = s;
291 	} while (1);
292 
293 	__print_intel_runtime_pm_wakeref(p, &dbg);
294 
295 out:
296 	kfree(dbg.owners);
297 }
298 
299 #else
300 
301 static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
302 {
303 }
304 
305 static depot_stack_handle_t
306 track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
307 {
308 	return -1;
309 }
310 
311 static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
312 					     intel_wakeref_t wref)
313 {
314 }
315 
316 static void
317 __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
318 {
319 	atomic_dec(&rpm->wakeref_count);
320 }
321 
322 static void
323 untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
324 {
325 }
326 
327 #endif
328 
329 static void
330 intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
331 {
332 	if (wakelock) {
333 		atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
334 		assert_rpm_wakelock_held(rpm);
335 	} else {
336 		atomic_inc(&rpm->wakeref_count);
337 		assert_rpm_raw_wakeref_held(rpm);
338 	}
339 }
340 
341 static void
342 intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
343 {
344 	if (wakelock) {
345 		assert_rpm_wakelock_held(rpm);
346 		atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
347 	} else {
348 		assert_rpm_raw_wakeref_held(rpm);
349 	}
350 
351 	__intel_wakeref_dec_and_check_tracking(rpm);
352 }
353 
354 static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
355 					      bool wakelock)
356 {
357 	struct drm_i915_private *i915 = rpm_to_i915(rpm);
358 	int ret;
359 
360 	ret = pm_runtime_get_sync(rpm->kdev);
361 	drm_WARN_ONCE(&i915->drm, ret < 0,
362 		      "pm_runtime_get_sync() failed: %d\n", ret);
363 
364 	intel_runtime_pm_acquire(rpm, wakelock);
365 
366 	return track_intel_runtime_pm_wakeref(rpm);
367 }
368 
369 /**
370  * intel_runtime_pm_get_raw - grab a raw runtime pm reference
371  * @rpm: the intel_runtime_pm structure
372  *
373  * This is the unlocked version of intel_display_power_is_enabled() and should
374  * only be used from error capture and recovery code where deadlocks are
375  * possible.
376  * This function grabs a device-level runtime pm reference (mostly used for
377  * asynchronous PM management from display code) and ensures that it is powered
378  * up. Raw references are not considered during wakelock assert checks.
379  *
380  * Any runtime pm reference obtained by this function must have a symmetric
381  * call to intel_runtime_pm_put_raw() to release the reference again.
382  *
383  * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
384  * as True if the wakeref was acquired, or False otherwise.
385  */
386 intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
387 {
388 	return __intel_runtime_pm_get(rpm, false);
389 }
390 
391 /**
392  * intel_runtime_pm_get - grab a runtime pm reference
393  * @rpm: the intel_runtime_pm structure
394  *
395  * This function grabs a device-level runtime pm reference (mostly used for GEM
396  * code to ensure the GTT or GT is on) and ensures that it is powered up.
397  *
398  * Any runtime pm reference obtained by this function must have a symmetric
399  * call to intel_runtime_pm_put() to release the reference again.
400  *
401  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
402  */
403 intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
404 {
405 	return __intel_runtime_pm_get(rpm, true);
406 }
407 
408 /**
409  * __intel_runtime_pm_get_if_active - grab a runtime pm reference if device is active
410  * @rpm: the intel_runtime_pm structure
411  * @ignore_usecount: get a ref even if dev->power.usage_count is 0
412  *
413  * This function grabs a device-level runtime pm reference if the device is
414  * already active and ensures that it is powered up. It is illegal to try
415  * and access the HW should intel_runtime_pm_get_if_active() report failure.
416  *
417  * If @ignore_usecount is true, a reference will be acquired even if there is no
418  * user requiring the device to be powered up (dev->power.usage_count == 0).
419  * If the function returns false in this case then it's guaranteed that the
420  * device's runtime suspend hook has been called already or that it will be
421  * called (and hence it's also guaranteed that the device's runtime resume
422  * hook will be called eventually).
423  *
424  * Any runtime pm reference obtained by this function must have a symmetric
425  * call to intel_runtime_pm_put() to release the reference again.
426  *
427  * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
428  * as True if the wakeref was acquired, or False otherwise.
429  */
430 static intel_wakeref_t __intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm,
431 							bool ignore_usecount)
432 {
433 	if (IS_ENABLED(CONFIG_PM)) {
434 		/*
435 		 * In cases runtime PM is disabled by the RPM core and we get
436 		 * an -EINVAL return value we are not supposed to call this
437 		 * function, since the power state is undefined. This applies
438 		 * atm to the late/early system suspend/resume handlers.
439 		 */
440 		if (pm_runtime_get_if_active(rpm->kdev, ignore_usecount) <= 0)
441 			return 0;
442 	}
443 
444 	intel_runtime_pm_acquire(rpm, true);
445 
446 	return track_intel_runtime_pm_wakeref(rpm);
447 }
448 
449 intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
450 {
451 	return __intel_runtime_pm_get_if_active(rpm, false);
452 }
453 
454 intel_wakeref_t intel_runtime_pm_get_if_active(struct intel_runtime_pm *rpm)
455 {
456 	return __intel_runtime_pm_get_if_active(rpm, true);
457 }
458 
459 /**
460  * intel_runtime_pm_get_noresume - grab a runtime pm reference
461  * @rpm: the intel_runtime_pm structure
462  *
463  * This function grabs a device-level runtime pm reference (mostly used for GEM
464  * code to ensure the GTT or GT is on).
465  *
466  * It will _not_ power up the device but instead only check that it's powered
467  * on.  Therefore it is only valid to call this functions from contexts where
468  * the device is known to be powered up and where trying to power it up would
469  * result in hilarity and deadlocks. That pretty much means only the system
470  * suspend/resume code where this is used to grab runtime pm references for
471  * delayed setup down in work items.
472  *
473  * Any runtime pm reference obtained by this function must have a symmetric
474  * call to intel_runtime_pm_put() to release the reference again.
475  *
476  * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
477  */
478 intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
479 {
480 	assert_rpm_wakelock_held(rpm);
481 	pm_runtime_get_noresume(rpm->kdev);
482 
483 	intel_runtime_pm_acquire(rpm, true);
484 
485 	return track_intel_runtime_pm_wakeref(rpm);
486 }
487 
488 static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
489 				   intel_wakeref_t wref,
490 				   bool wakelock)
491 {
492 	struct device *kdev = rpm->kdev;
493 
494 	untrack_intel_runtime_pm_wakeref(rpm, wref);
495 
496 	intel_runtime_pm_release(rpm, wakelock);
497 
498 	pm_runtime_mark_last_busy(kdev);
499 	pm_runtime_put_autosuspend(kdev);
500 }
501 
502 /**
503  * intel_runtime_pm_put_raw - release a raw runtime pm reference
504  * @rpm: the intel_runtime_pm structure
505  * @wref: wakeref acquired for the reference that is being released
506  *
507  * This function drops the device-level runtime pm reference obtained by
508  * intel_runtime_pm_get_raw() and might power down the corresponding
509  * hardware block right away if this is the last reference.
510  */
511 void
512 intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
513 {
514 	__intel_runtime_pm_put(rpm, wref, false);
515 }
516 
517 /**
518  * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
519  * @rpm: the intel_runtime_pm structure
520  *
521  * This function drops the device-level runtime pm reference obtained by
522  * intel_runtime_pm_get() and might power down the corresponding
523  * hardware block right away if this is the last reference.
524  *
525  * This function exists only for historical reasons and should be avoided in
526  * new code, as the correctness of its use cannot be checked. Always use
527  * intel_runtime_pm_put() instead.
528  */
529 void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
530 {
531 	__intel_runtime_pm_put(rpm, -1, true);
532 }
533 
534 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
535 /**
536  * intel_runtime_pm_put - release a runtime pm reference
537  * @rpm: the intel_runtime_pm structure
538  * @wref: wakeref acquired for the reference that is being released
539  *
540  * This function drops the device-level runtime pm reference obtained by
541  * intel_runtime_pm_get() and might power down the corresponding
542  * hardware block right away if this is the last reference.
543  */
544 void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
545 {
546 	__intel_runtime_pm_put(rpm, wref, true);
547 }
548 #endif
549 
550 /**
551  * intel_runtime_pm_enable - enable runtime pm
552  * @rpm: the intel_runtime_pm structure
553  *
554  * This function enables runtime pm at the end of the driver load sequence.
555  *
556  * Note that this function does currently not enable runtime pm for the
557  * subordinate display power domains. That is done by
558  * intel_power_domains_enable().
559  */
560 void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
561 {
562 	struct drm_i915_private *i915 = rpm_to_i915(rpm);
563 	struct device *kdev = rpm->kdev;
564 
565 	/*
566 	 * Disable the system suspend direct complete optimization, which can
567 	 * leave the device suspended skipping the driver's suspend handlers
568 	 * if the device was already runtime suspended. This is needed due to
569 	 * the difference in our runtime and system suspend sequence and
570 	 * becaue the HDA driver may require us to enable the audio power
571 	 * domain during system suspend.
572 	 */
573 	dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
574 
575 	pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
576 	pm_runtime_mark_last_busy(kdev);
577 
578 	/*
579 	 * Take a permanent reference to disable the RPM functionality and drop
580 	 * it only when unloading the driver. Use the low level get/put helpers,
581 	 * so the driver's own RPM reference tracking asserts also work on
582 	 * platforms without RPM support.
583 	 */
584 	if (!rpm->available) {
585 		int ret;
586 
587 		pm_runtime_dont_use_autosuspend(kdev);
588 		ret = pm_runtime_get_sync(kdev);
589 		drm_WARN(&i915->drm, ret < 0,
590 			 "pm_runtime_get_sync() failed: %d\n", ret);
591 	} else {
592 		pm_runtime_use_autosuspend(kdev);
593 	}
594 
595 	/*
596 	 *  FIXME: Temp hammer to keep autosupend disable on lmem supported platforms.
597 	 *  As per PCIe specs 5.3.1.4.1, all iomem read write request over a PCIe
598 	 *  function will be unsupported in case PCIe endpoint function is in D3.
599 	 *  Let's keep i915 autosuspend control 'on' till we fix all known issue
600 	 *  with lmem access in D3.
601 	 */
602 	if (!IS_DGFX(i915))
603 		pm_runtime_allow(kdev);
604 
605 	/*
606 	 * The core calls the driver load handler with an RPM reference held.
607 	 * We drop that here and will reacquire it during unloading in
608 	 * intel_power_domains_fini().
609 	 */
610 	pm_runtime_put_autosuspend(kdev);
611 }
612 
613 void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
614 {
615 	struct drm_i915_private *i915 = rpm_to_i915(rpm);
616 	struct device *kdev = rpm->kdev;
617 
618 	/* Transfer rpm ownership back to core */
619 	drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0,
620 		 "Failed to pass rpm ownership back to core\n");
621 
622 	pm_runtime_dont_use_autosuspend(kdev);
623 
624 	if (!rpm->available)
625 		pm_runtime_put(kdev);
626 }
627 
628 void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
629 {
630 	struct drm_i915_private *i915 = rpm_to_i915(rpm);
631 	int count = atomic_read(&rpm->wakeref_count);
632 
633 	intel_wakeref_auto_fini(&rpm->userfault_wakeref);
634 
635 	drm_WARN(&i915->drm, count,
636 		 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
637 		 intel_rpm_raw_wakeref_count(count),
638 		 intel_rpm_wakelock_count(count));
639 
640 	untrack_all_intel_runtime_pm_wakerefs(rpm);
641 }
642 
643 void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
644 {
645 	struct drm_i915_private *i915 = rpm_to_i915(rpm);
646 	struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
647 	struct device *kdev = &pdev->dev;
648 
649 	rpm->kdev = kdev;
650 	rpm->available = HAS_RUNTIME_PM(i915);
651 	atomic_set(&rpm->wakeref_count, 0);
652 
653 	init_intel_runtime_pm_wakeref(rpm);
654 	INIT_LIST_HEAD(&rpm->lmem_userfault_list);
655 	spin_lock_init(&rpm->lmem_userfault_lock);
656 	intel_wakeref_auto_init(&rpm->userfault_wakeref, i915);
657 }
658