xref: /linux/drivers/gpu/drm/i915/intel_wakeref.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include <linux/wait_bit.h>
8 
9 #include <drm/drm_print.h>
10 
11 #include "intel_runtime_pm.h"
12 #include "intel_wakeref.h"
13 #include "i915_drv.h"
14 
15 int __intel_wakeref_get_first(struct intel_wakeref *wf)
16 {
17 	intel_wakeref_t wakeref;
18 	int ret = 0;
19 
20 	wakeref = intel_runtime_pm_get(&wf->i915->runtime_pm);
21 	/*
22 	 * Treat get/put as different subclasses, as we may need to run
23 	 * the put callback from under the shrinker and do not want to
24 	 * cross-contanimate that callback with any extra work performed
25 	 * upon acquiring the wakeref.
26 	 */
27 	mutex_lock_nested(&wf->mutex, SINGLE_DEPTH_NESTING);
28 
29 	if (!atomic_read(&wf->count)) {
30 		INTEL_WAKEREF_BUG_ON(wf->wakeref);
31 		wf->wakeref = wakeref;
32 		wakeref = NULL;
33 
34 		ret = wf->ops->get(wf);
35 		if (ret) {
36 			wakeref = xchg(&wf->wakeref, NULL);
37 			wake_up_var(&wf->wakeref);
38 			goto unlock;
39 		}
40 
41 		smp_mb__before_atomic(); /* release wf->count */
42 	}
43 
44 	atomic_inc(&wf->count);
45 	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
46 
47 unlock:
48 	mutex_unlock(&wf->mutex);
49 	if (unlikely(wakeref))
50 		intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
51 
52 	return ret;
53 }
54 
55 static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
56 {
57 	intel_wakeref_t wakeref = NULL;
58 
59 	INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
60 	if (unlikely(!atomic_dec_and_test(&wf->count)))
61 		goto unlock;
62 
63 	/* ops->put() must reschedule its own release on error/deferral */
64 	if (likely(!wf->ops->put(wf))) {
65 		INTEL_WAKEREF_BUG_ON(!wf->wakeref);
66 		wakeref = xchg(&wf->wakeref, NULL);
67 		wake_up_var(&wf->wakeref);
68 	}
69 
70 unlock:
71 	mutex_unlock(&wf->mutex);
72 	if (wakeref)
73 		intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
74 }
75 
76 void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
77 {
78 	INTEL_WAKEREF_BUG_ON(delayed_work_pending(&wf->work));
79 
80 	/* Assume we are not in process context and so cannot sleep. */
81 	if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
82 		mod_delayed_work(wf->i915->unordered_wq, &wf->work,
83 				 FIELD_GET(INTEL_WAKEREF_PUT_DELAY, flags));
84 		return;
85 	}
86 
87 	____intel_wakeref_put_last(wf);
88 }
89 
90 static void __intel_wakeref_put_work(struct work_struct *wrk)
91 {
92 	struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work.work);
93 
94 	if (atomic_add_unless(&wf->count, -1, 1))
95 		return;
96 
97 	mutex_lock(&wf->mutex);
98 	____intel_wakeref_put_last(wf);
99 }
100 
101 void __intel_wakeref_init(struct intel_wakeref *wf,
102 			  struct drm_i915_private *i915,
103 			  const struct intel_wakeref_ops *ops,
104 			  struct intel_wakeref_lockclass *key,
105 			  const char *name)
106 {
107 	wf->i915 = i915;
108 	wf->ops = ops;
109 
110 	__mutex_init(&wf->mutex, "wakeref.mutex", &key->mutex);
111 	atomic_set(&wf->count, 0);
112 	wf->wakeref = NULL;
113 
114 	INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
115 	lockdep_init_map(&wf->work.work.lockdep_map,
116 			 "wakeref.work", &key->work, 0);
117 
118 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
119 	if (!wf->debug.class)
120 		ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, "intel_wakeref");
121 #endif
122 }
123 
124 int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
125 {
126 	int err;
127 
128 	might_sleep();
129 
130 	err = wait_var_event_killable(&wf->wakeref,
131 				      !intel_wakeref_is_active(wf));
132 	if (err)
133 		return err;
134 
135 	intel_wakeref_unlock_wait(wf);
136 	return 0;
137 }
138 
139 static void wakeref_auto_timeout(struct timer_list *t)
140 {
141 	struct intel_wakeref_auto *wf = timer_container_of(wf, t, timer);
142 	intel_wakeref_t wakeref;
143 	unsigned long flags;
144 
145 	if (!refcount_dec_and_lock_irqsave(&wf->count, &wf->lock, &flags))
146 		return;
147 
148 	wakeref = xchg(&wf->wakeref, NULL);
149 	spin_unlock_irqrestore(&wf->lock, flags);
150 
151 	intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref);
152 }
153 
154 void intel_wakeref_auto_init(struct intel_wakeref_auto *wf,
155 			     struct drm_i915_private *i915)
156 {
157 	spin_lock_init(&wf->lock);
158 	timer_setup(&wf->timer, wakeref_auto_timeout, 0);
159 	refcount_set(&wf->count, 0);
160 	wf->wakeref = NULL;
161 	wf->i915 = i915;
162 }
163 
164 void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout)
165 {
166 	unsigned long flags;
167 
168 	if (!timeout) {
169 		if (timer_delete_sync(&wf->timer))
170 			wakeref_auto_timeout(&wf->timer);
171 		return;
172 	}
173 
174 	/* Our mission is that we only extend an already active wakeref */
175 	assert_rpm_wakelock_held(&wf->i915->runtime_pm);
176 
177 	if (!refcount_inc_not_zero(&wf->count)) {
178 		spin_lock_irqsave(&wf->lock, flags);
179 		if (!refcount_inc_not_zero(&wf->count)) {
180 			INTEL_WAKEREF_BUG_ON(wf->wakeref);
181 			wf->wakeref =
182 				intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm);
183 			refcount_set(&wf->count, 1);
184 		}
185 		spin_unlock_irqrestore(&wf->lock, flags);
186 	}
187 
188 	/*
189 	 * If we extend a pending timer, we will only get a single timer
190 	 * callback and so need to cancel the local inc by running the
191 	 * elided callback to keep the wf->count balanced.
192 	 */
193 	if (mod_timer(&wf->timer, jiffies + timeout))
194 		wakeref_auto_timeout(&wf->timer);
195 }
196 
197 void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
198 {
199 	intel_wakeref_auto(wf, 0);
200 	INTEL_WAKEREF_BUG_ON(wf->wakeref);
201 }
202 
203 void intel_ref_tracker_show(struct ref_tracker_dir *dir,
204 			    struct drm_printer *p)
205 {
206 	const size_t buf_size = PAGE_SIZE;
207 	char *buf, *sb, *se;
208 	size_t count;
209 
210 	buf = kmalloc(buf_size, GFP_NOWAIT);
211 	if (!buf)
212 		return;
213 
214 	count = ref_tracker_dir_snprint(dir, buf, buf_size);
215 	if (!count)
216 		goto free;
217 	/* printk does not like big buffers, so we split it */
218 	for (sb = buf; *sb; sb = se + 1) {
219 		se = strchrnul(sb, '\n');
220 		drm_printf(p, "%.*s", (int)(se - sb + 1), sb);
221 		if (!*se)
222 			break;
223 	}
224 	if (count >= buf_size)
225 		drm_printf(p, "\n...dropped %zd extra bytes of leak report.\n",
226 			   count + 1 - buf_size);
227 free:
228 	kfree(buf);
229 }
230