1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7 #include <linux/kref.h>
8 #include <linux/string_helpers.h>
9
10 #include <drm/drm_print.h>
11
12 #include "gem/i915_gem_pm.h"
13 #include "gt/intel_gt.h"
14
15 #include "i915_selftest.h"
16
17 #include "igt_flush_test.h"
18 #include "lib_sw_fence.h"
19
20 struct live_active {
21 struct i915_active base;
22 struct kref ref;
23 bool retired;
24 };
25
__live_get(struct live_active * active)26 static void __live_get(struct live_active *active)
27 {
28 kref_get(&active->ref);
29 }
30
__live_free(struct live_active * active)31 static void __live_free(struct live_active *active)
32 {
33 i915_active_fini(&active->base);
34 kfree(active);
35 }
36
__live_release(struct kref * ref)37 static void __live_release(struct kref *ref)
38 {
39 struct live_active *active = container_of(ref, typeof(*active), ref);
40
41 __live_free(active);
42 }
43
__live_put(struct live_active * active)44 static void __live_put(struct live_active *active)
45 {
46 kref_put(&active->ref, __live_release);
47 }
48
__live_active(struct i915_active * base)49 static int __live_active(struct i915_active *base)
50 {
51 struct live_active *active = container_of(base, typeof(*active), base);
52
53 __live_get(active);
54 return 0;
55 }
56
__live_retire(struct i915_active * base)57 static void __live_retire(struct i915_active *base)
58 {
59 struct live_active *active = container_of(base, typeof(*active), base);
60
61 active->retired = true;
62 __live_put(active);
63 }
64
__live_alloc(struct drm_i915_private * i915)65 static struct live_active *__live_alloc(struct drm_i915_private *i915)
66 {
67 struct live_active *active;
68
69 active = kzalloc_obj(*active);
70 if (!active)
71 return NULL;
72
73 kref_init(&active->ref);
74 i915_active_init(&active->base, __live_active, __live_retire, 0);
75
76 return active;
77 }
78
79 static struct live_active *
__live_active_setup(struct drm_i915_private * i915)80 __live_active_setup(struct drm_i915_private *i915)
81 {
82 struct intel_engine_cs *engine;
83 struct i915_sw_fence *submit;
84 struct live_active *active;
85 unsigned int count = 0;
86 int err = 0;
87
88 active = __live_alloc(i915);
89 if (!active)
90 return ERR_PTR(-ENOMEM);
91
92 submit = heap_fence_create(GFP_KERNEL);
93 if (!submit) {
94 kfree(active);
95 return ERR_PTR(-ENOMEM);
96 }
97
98 err = i915_active_acquire(&active->base);
99 if (err)
100 goto out;
101
102 for_each_uabi_engine(engine, i915) {
103 struct i915_request *rq;
104
105 rq = intel_engine_create_kernel_request(engine);
106 if (IS_ERR(rq)) {
107 err = PTR_ERR(rq);
108 break;
109 }
110
111 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
112 submit,
113 GFP_KERNEL);
114 if (err >= 0)
115 err = i915_active_add_request(&active->base, rq);
116 i915_request_add(rq);
117 if (err) {
118 pr_err("Failed to track active ref!\n");
119 break;
120 }
121
122 count++;
123 }
124
125 i915_active_release(&active->base);
126 if (READ_ONCE(active->retired) && count) {
127 pr_err("i915_active retired before submission!\n");
128 err = -EINVAL;
129 }
130 if (atomic_read(&active->base.count) != count) {
131 pr_err("i915_active not tracking all requests, found %d, expected %d\n",
132 atomic_read(&active->base.count), count);
133 err = -EINVAL;
134 }
135
136 out:
137 i915_sw_fence_commit(submit);
138 heap_fence_put(submit);
139 if (err) {
140 __live_put(active);
141 active = ERR_PTR(err);
142 }
143
144 return active;
145 }
146
live_active_wait(void * arg)147 static int live_active_wait(void *arg)
148 {
149 struct drm_i915_private *i915 = arg;
150 struct live_active *active;
151 int err = 0;
152
153 /* Check that we get a callback when requests retire upon waiting */
154
155 active = __live_active_setup(i915);
156 if (IS_ERR(active))
157 return PTR_ERR(active);
158
159 __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
160 if (!READ_ONCE(active->retired)) {
161 struct drm_printer p = drm_err_printer(&i915->drm, __func__);
162
163 drm_printf(&p, "i915_active not retired after waiting!\n");
164 i915_active_print(&active->base, &p);
165
166 err = -EINVAL;
167 }
168
169 __live_put(active);
170
171 if (igt_flush_test(i915))
172 err = -EIO;
173
174 return err;
175 }
176
live_active_retire(void * arg)177 static int live_active_retire(void *arg)
178 {
179 struct drm_i915_private *i915 = arg;
180 struct live_active *active;
181 int err = 0;
182
183 /* Check that we get a callback when requests are indirectly retired */
184
185 active = __live_active_setup(i915);
186 if (IS_ERR(active))
187 return PTR_ERR(active);
188
189 /* waits for & retires all requests */
190 if (igt_flush_test(i915))
191 err = -EIO;
192
193 if (!READ_ONCE(active->retired)) {
194 struct drm_printer p = drm_err_printer(&i915->drm, __func__);
195
196 drm_printf(&p, "i915_active not retired after flushing!\n");
197 i915_active_print(&active->base, &p);
198
199 err = -EINVAL;
200 }
201
202 __live_put(active);
203
204 return err;
205 }
206
live_active_barrier(void * arg)207 static int live_active_barrier(void *arg)
208 {
209 struct drm_i915_private *i915 = arg;
210 struct intel_engine_cs *engine;
211 struct live_active *active;
212 int err = 0;
213
214 /* Check that we get a callback when requests retire upon waiting */
215
216 active = __live_alloc(i915);
217 if (!active)
218 return -ENOMEM;
219
220 err = i915_active_acquire(&active->base);
221 if (err)
222 goto out;
223
224 for_each_uabi_engine(engine, i915) {
225 err = i915_active_acquire_preallocate_barrier(&active->base,
226 engine);
227 if (err)
228 break;
229
230 i915_active_acquire_barrier(&active->base);
231 }
232
233 i915_active_release(&active->base);
234 if (err)
235 goto out;
236
237 __i915_active_wait(&active->base, TASK_UNINTERRUPTIBLE);
238 if (!READ_ONCE(active->retired)) {
239 pr_err("i915_active not retired after flushing barriers!\n");
240 err = -EINVAL;
241 }
242
243 out:
244 __live_put(active);
245
246 if (igt_flush_test(i915))
247 err = -EIO;
248
249 return err;
250 }
251
i915_active_live_selftests(struct drm_i915_private * i915)252 int i915_active_live_selftests(struct drm_i915_private *i915)
253 {
254 static const struct i915_subtest tests[] = {
255 SUBTEST(live_active_wait),
256 SUBTEST(live_active_retire),
257 SUBTEST(live_active_barrier),
258 };
259
260 if (intel_gt_is_wedged(to_gt(i915)))
261 return 0;
262
263 return i915_subtests(tests, i915);
264 }
265
node_to_barrier(struct active_node * it)266 static struct intel_engine_cs *node_to_barrier(struct active_node *it)
267 {
268 struct intel_engine_cs *engine;
269
270 if (!is_barrier(&it->base))
271 return NULL;
272
273 engine = __barrier_to_engine(it);
274 smp_rmb(); /* serialise with add_active_barriers */
275 if (!is_barrier(&it->base))
276 return NULL;
277
278 return engine;
279 }
280
i915_active_print(struct i915_active * ref,struct drm_printer * m)281 void i915_active_print(struct i915_active *ref, struct drm_printer *m)
282 {
283 drm_printf(m, "active %ps:%ps\n", ref->active, ref->retire);
284 drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count));
285 drm_printf(m, "\tpreallocated barriers? %s\n",
286 str_yes_no(!llist_empty(&ref->preallocated_barriers)));
287
288 if (i915_active_acquire_if_busy(ref)) {
289 struct active_node *it, *n;
290
291 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
292 struct intel_engine_cs *engine;
293
294 engine = node_to_barrier(it);
295 if (engine) {
296 drm_printf(m, "\tbarrier: %s\n", engine->name);
297 continue;
298 }
299
300 if (i915_active_fence_isset(&it->base)) {
301 drm_printf(m,
302 "\ttimeline: %llx\n", it->timeline);
303 continue;
304 }
305 }
306
307 i915_active_release(ref);
308 }
309 }
310
spin_unlock_wait(spinlock_t * lock)311 static void spin_unlock_wait(spinlock_t *lock)
312 {
313 spin_lock_irq(lock);
314 spin_unlock_irq(lock);
315 }
316
active_flush(struct i915_active * ref,struct i915_active_fence * active)317 static void active_flush(struct i915_active *ref,
318 struct i915_active_fence *active)
319 {
320 struct dma_fence *fence;
321
322 fence = xchg(__active_fence_slot(active), NULL);
323 if (!fence)
324 return;
325
326 spin_lock_irq(fence->lock);
327 __list_del_entry(&active->cb.node);
328 spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
329 atomic_dec(&ref->count);
330
331 GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
332 }
333
i915_active_unlock_wait(struct i915_active * ref)334 void i915_active_unlock_wait(struct i915_active *ref)
335 {
336 if (i915_active_acquire_if_busy(ref)) {
337 struct active_node *it, *n;
338
339 /* Wait for all active callbacks */
340 rcu_read_lock();
341 active_flush(ref, &ref->excl);
342 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
343 active_flush(ref, &it->base);
344 rcu_read_unlock();
345
346 i915_active_release(ref);
347 }
348
349 /* And wait for the retire callback */
350 spin_unlock_wait(&ref->tree_lock);
351
352 /* ... which may have been on a thread instead */
353 flush_work(&ref->work);
354 }
355