xref: /linux/drivers/gpu/drm/i915/gt/intel_context.c (revision 6fdcba32711044c35c0e1b094cbd8f3f0b4472c9)
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6 
7 #include "gem/i915_gem_context.h"
8 #include "gem/i915_gem_pm.h"
9 
10 #include "i915_drv.h"
11 #include "i915_globals.h"
12 
13 #include "intel_context.h"
14 #include "intel_engine.h"
15 #include "intel_engine_pm.h"
16 #include "intel_ring.h"
17 
18 static struct i915_global_context {
19 	struct i915_global base;
20 	struct kmem_cache *slab_ce;
21 } global;
22 
23 static struct intel_context *intel_context_alloc(void)
24 {
25 	return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
26 }
27 
28 void intel_context_free(struct intel_context *ce)
29 {
30 	kmem_cache_free(global.slab_ce, ce);
31 }
32 
33 struct intel_context *
34 intel_context_create(struct i915_gem_context *ctx,
35 		     struct intel_engine_cs *engine)
36 {
37 	struct intel_context *ce;
38 
39 	ce = intel_context_alloc();
40 	if (!ce)
41 		return ERR_PTR(-ENOMEM);
42 
43 	intel_context_init(ce, ctx, engine);
44 	return ce;
45 }
46 
47 int __intel_context_do_pin(struct intel_context *ce)
48 {
49 	int err;
50 
51 	if (mutex_lock_interruptible(&ce->pin_mutex))
52 		return -EINTR;
53 
54 	if (likely(!atomic_read(&ce->pin_count))) {
55 		intel_wakeref_t wakeref;
56 
57 		if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
58 			err = ce->ops->alloc(ce);
59 			if (unlikely(err))
60 				goto err;
61 
62 			__set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
63 		}
64 
65 		err = 0;
66 		with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref)
67 			err = ce->ops->pin(ce);
68 		if (err)
69 			goto err;
70 
71 		GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
72 			  ce->engine->name, ce->timeline->fence_context,
73 			  ce->ring->head, ce->ring->tail);
74 
75 		i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
76 
77 		smp_mb__before_atomic(); /* flush pin before it is visible */
78 	}
79 
80 	atomic_inc(&ce->pin_count);
81 	GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
82 
83 	mutex_unlock(&ce->pin_mutex);
84 	return 0;
85 
86 err:
87 	mutex_unlock(&ce->pin_mutex);
88 	return err;
89 }
90 
91 void intel_context_unpin(struct intel_context *ce)
92 {
93 	if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
94 		return;
95 
96 	/* We may be called from inside intel_context_pin() to evict another */
97 	intel_context_get(ce);
98 	mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
99 
100 	if (likely(atomic_dec_and_test(&ce->pin_count))) {
101 		GEM_TRACE("%s context:%llx retire\n",
102 			  ce->engine->name, ce->timeline->fence_context);
103 
104 		ce->ops->unpin(ce);
105 
106 		i915_gem_context_put(ce->gem_context);
107 		intel_context_active_release(ce);
108 	}
109 
110 	mutex_unlock(&ce->pin_mutex);
111 	intel_context_put(ce);
112 }
113 
114 static int __context_pin_state(struct i915_vma *vma)
115 {
116 	u64 flags;
117 	int err;
118 
119 	flags = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
120 	flags |= PIN_HIGH | PIN_GLOBAL;
121 
122 	err = i915_vma_pin(vma, 0, 0, flags);
123 	if (err)
124 		return err;
125 
126 	/*
127 	 * And mark it as a globally pinned object to let the shrinker know
128 	 * it cannot reclaim the object until we release it.
129 	 */
130 	i915_vma_make_unshrinkable(vma);
131 	vma->obj->mm.dirty = true;
132 
133 	return 0;
134 }
135 
136 static void __context_unpin_state(struct i915_vma *vma)
137 {
138 	i915_vma_make_shrinkable(vma);
139 	__i915_vma_unpin(vma);
140 }
141 
142 __i915_active_call
143 static void __intel_context_retire(struct i915_active *active)
144 {
145 	struct intel_context *ce = container_of(active, typeof(*ce), active);
146 
147 	GEM_TRACE("%s context:%llx retire\n",
148 		  ce->engine->name, ce->timeline->fence_context);
149 
150 	if (ce->state)
151 		__context_unpin_state(ce->state);
152 
153 	intel_timeline_unpin(ce->timeline);
154 	intel_ring_unpin(ce->ring);
155 
156 	intel_context_put(ce);
157 }
158 
159 static int __intel_context_active(struct i915_active *active)
160 {
161 	struct intel_context *ce = container_of(active, typeof(*ce), active);
162 	int err;
163 
164 	intel_context_get(ce);
165 
166 	err = intel_ring_pin(ce->ring);
167 	if (err)
168 		goto err_put;
169 
170 	err = intel_timeline_pin(ce->timeline);
171 	if (err)
172 		goto err_ring;
173 
174 	if (!ce->state)
175 		return 0;
176 
177 	err = __context_pin_state(ce->state);
178 	if (err)
179 		goto err_timeline;
180 
181 	return 0;
182 
183 err_timeline:
184 	intel_timeline_unpin(ce->timeline);
185 err_ring:
186 	intel_ring_unpin(ce->ring);
187 err_put:
188 	intel_context_put(ce);
189 	return err;
190 }
191 
192 int intel_context_active_acquire(struct intel_context *ce)
193 {
194 	int err;
195 
196 	err = i915_active_acquire(&ce->active);
197 	if (err)
198 		return err;
199 
200 	/* Preallocate tracking nodes */
201 	if (!i915_gem_context_is_kernel(ce->gem_context)) {
202 		err = i915_active_acquire_preallocate_barrier(&ce->active,
203 							      ce->engine);
204 		if (err) {
205 			i915_active_release(&ce->active);
206 			return err;
207 		}
208 	}
209 
210 	return 0;
211 }
212 
213 void intel_context_active_release(struct intel_context *ce)
214 {
215 	/* Nodes preallocated in intel_context_active() */
216 	i915_active_acquire_barrier(&ce->active);
217 	i915_active_release(&ce->active);
218 }
219 
220 void
221 intel_context_init(struct intel_context *ce,
222 		   struct i915_gem_context *ctx,
223 		   struct intel_engine_cs *engine)
224 {
225 	struct i915_address_space *vm;
226 
227 	GEM_BUG_ON(!engine->cops);
228 
229 	kref_init(&ce->ref);
230 
231 	ce->gem_context = ctx;
232 	rcu_read_lock();
233 	vm = rcu_dereference(ctx->vm);
234 	if (vm)
235 		ce->vm = i915_vm_get(vm);
236 	else
237 		ce->vm = i915_vm_get(&engine->gt->ggtt->vm);
238 	rcu_read_unlock();
239 	if (ctx->timeline)
240 		ce->timeline = intel_timeline_get(ctx->timeline);
241 
242 	ce->engine = engine;
243 	ce->ops = engine->cops;
244 	ce->sseu = engine->sseu;
245 	ce->ring = __intel_context_ring_size(SZ_16K);
246 
247 	INIT_LIST_HEAD(&ce->signal_link);
248 	INIT_LIST_HEAD(&ce->signals);
249 
250 	mutex_init(&ce->pin_mutex);
251 
252 	i915_active_init(&ce->active,
253 			 __intel_context_active, __intel_context_retire);
254 }
255 
256 void intel_context_fini(struct intel_context *ce)
257 {
258 	if (ce->timeline)
259 		intel_timeline_put(ce->timeline);
260 	i915_vm_put(ce->vm);
261 
262 	mutex_destroy(&ce->pin_mutex);
263 	i915_active_fini(&ce->active);
264 }
265 
266 static void i915_global_context_shrink(void)
267 {
268 	kmem_cache_shrink(global.slab_ce);
269 }
270 
271 static void i915_global_context_exit(void)
272 {
273 	kmem_cache_destroy(global.slab_ce);
274 }
275 
276 static struct i915_global_context global = { {
277 	.shrink = i915_global_context_shrink,
278 	.exit = i915_global_context_exit,
279 } };
280 
281 int __init i915_global_context_init(void)
282 {
283 	global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
284 	if (!global.slab_ce)
285 		return -ENOMEM;
286 
287 	i915_global_register(&global.base);
288 	return 0;
289 }
290 
291 void intel_context_enter_engine(struct intel_context *ce)
292 {
293 	intel_engine_pm_get(ce->engine);
294 	intel_timeline_enter(ce->timeline);
295 }
296 
297 void intel_context_exit_engine(struct intel_context *ce)
298 {
299 	intel_timeline_exit(ce->timeline);
300 	intel_engine_pm_put(ce->engine);
301 }
302 
303 int intel_context_prepare_remote_request(struct intel_context *ce,
304 					 struct i915_request *rq)
305 {
306 	struct intel_timeline *tl = ce->timeline;
307 	int err;
308 
309 	/* Only suitable for use in remotely modifying this context */
310 	GEM_BUG_ON(rq->hw_context == ce);
311 
312 	if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
313 		/*
314 		 * Ideally, we just want to insert our foreign fence as
315 		 * a barrier into the remove context, such that this operation
316 		 * occurs after all current operations in that context, and
317 		 * all future operations must occur after this.
318 		 *
319 		 * Currently, the timeline->last_request tracking is guarded
320 		 * by its mutex and so we must obtain that to atomically
321 		 * insert our barrier. However, since we already hold our
322 		 * timeline->mutex, we must be careful against potential
323 		 * inversion if we are the kernel_context as the remote context
324 		 * will itself poke at the kernel_context when it needs to
325 		 * unpin. Ergo, if already locked, we drop both locks and
326 		 * try again (through the magic of userspace repeating EAGAIN).
327 		 */
328 		if (!mutex_trylock(&tl->mutex))
329 			return -EAGAIN;
330 
331 		/* Queue this switch after current activity by this context. */
332 		err = i915_active_fence_set(&tl->last_request, rq);
333 		mutex_unlock(&tl->mutex);
334 		if (err)
335 			return err;
336 	}
337 
338 	/*
339 	 * Guarantee context image and the timeline remains pinned until the
340 	 * modifying request is retired by setting the ce activity tracker.
341 	 *
342 	 * But we only need to take one pin on the account of it. Or in other
343 	 * words transfer the pinned ce object to tracked active request.
344 	 */
345 	GEM_BUG_ON(i915_active_is_idle(&ce->active));
346 	return i915_active_add_request(&ce->active, rq);
347 }
348 
349 struct i915_request *intel_context_create_request(struct intel_context *ce)
350 {
351 	struct i915_request *rq;
352 	int err;
353 
354 	err = intel_context_pin(ce);
355 	if (unlikely(err))
356 		return ERR_PTR(err);
357 
358 	rq = i915_request_create(ce);
359 	intel_context_unpin(ce);
360 
361 	return rq;
362 }
363 
364 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
365 #include "selftest_context.c"
366 #endif
367