xref: /linux/drivers/gpu/drm/nouveau/nouveau_fence.c (revision d40981350844c2cfa437abfc80596e10ea8f1149)
1 /*
2  * Copyright (C) 2007 Ben Skeggs.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sublicense, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the
14  * next paragraph) shall be included in all copies or substantial
15  * portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20  * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  */
26 
27 #include <linux/ktime.h>
28 #include <linux/hrtimer.h>
29 #include <linux/sched/signal.h>
30 #include <trace/events/dma_fence.h>
31 
32 #include <nvif/if0020.h>
33 
34 #include "nouveau_drv.h"
35 #include "nouveau_dma.h"
36 #include "nouveau_fence.h"
37 
38 static const struct dma_fence_ops nouveau_fence_ops_uevent;
39 static const struct dma_fence_ops nouveau_fence_ops_legacy;
40 
41 static inline struct nouveau_fence *
42 from_fence(struct dma_fence *fence)
43 {
44 	return container_of(fence, struct nouveau_fence, base);
45 }
46 
47 static inline struct nouveau_fence_chan *
48 nouveau_fctx(struct nouveau_fence *fence)
49 {
50 	return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
51 }
52 
53 static int
54 nouveau_fence_signal(struct nouveau_fence *fence)
55 {
56 	int drop = 0;
57 
58 	dma_fence_signal_locked(&fence->base);
59 	list_del(&fence->head);
60 	rcu_assign_pointer(fence->channel, NULL);
61 
62 	if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
63 		struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
64 
65 		if (!--fctx->notify_ref)
66 			drop = 1;
67 	}
68 
69 	dma_fence_put(&fence->base);
70 	return drop;
71 }
72 
73 static struct nouveau_fence *
74 nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
75 {
76 	if (fence->ops != &nouveau_fence_ops_legacy &&
77 	    fence->ops != &nouveau_fence_ops_uevent)
78 		return NULL;
79 
80 	return from_fence(fence);
81 }
82 
83 void
84 nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
85 {
86 	struct nouveau_fence *fence;
87 	unsigned long flags;
88 
89 	spin_lock_irqsave(&fctx->lock, flags);
90 	while (!list_empty(&fctx->pending)) {
91 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
92 
93 		if (error)
94 			dma_fence_set_error(&fence->base, error);
95 
96 		if (nouveau_fence_signal(fence))
97 			nvif_event_block(&fctx->event);
98 	}
99 	fctx->killed = 1;
100 	spin_unlock_irqrestore(&fctx->lock, flags);
101 }
102 
103 void
104 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
105 {
106 	cancel_work_sync(&fctx->uevent_work);
107 	nouveau_fence_context_kill(fctx, 0);
108 	nvif_event_dtor(&fctx->event);
109 	fctx->dead = 1;
110 
111 	/*
112 	 * Ensure that all accesses to fence->channel complete before freeing
113 	 * the channel.
114 	 */
115 	synchronize_rcu();
116 }
117 
118 static void
119 nouveau_fence_context_put(struct kref *fence_ref)
120 {
121 	kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref));
122 }
123 
124 void
125 nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
126 {
127 	kref_put(&fctx->fence_ref, nouveau_fence_context_put);
128 }
129 
130 static int
131 nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
132 {
133 	struct nouveau_fence *fence;
134 	int drop = 0;
135 	u32 seq = fctx->read(chan);
136 
137 	while (!list_empty(&fctx->pending)) {
138 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
139 
140 		if ((int)(seq - fence->base.seqno) < 0)
141 			break;
142 
143 		drop |= nouveau_fence_signal(fence);
144 	}
145 
146 	return drop;
147 }
148 
149 static void
150 nouveau_fence_uevent_work(struct work_struct *work)
151 {
152 	struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
153 						       uevent_work);
154 	unsigned long flags;
155 	int drop = 0;
156 
157 	spin_lock_irqsave(&fctx->lock, flags);
158 	if (!list_empty(&fctx->pending)) {
159 		struct nouveau_fence *fence;
160 		struct nouveau_channel *chan;
161 
162 		fence = list_entry(fctx->pending.next, typeof(*fence), head);
163 		chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
164 		if (nouveau_fence_update(chan, fctx))
165 			drop = 1;
166 	}
167 	if (drop)
168 		nvif_event_block(&fctx->event);
169 
170 	spin_unlock_irqrestore(&fctx->lock, flags);
171 }
172 
173 static int
174 nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc)
175 {
176 	struct nouveau_fence_chan *fctx = container_of(event, typeof(*fctx), event);
177 	schedule_work(&fctx->uevent_work);
178 	return NVIF_EVENT_KEEP;
179 }
180 
181 void
182 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
183 {
184 	struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
185 	struct nouveau_cli *cli = (void *)chan->user.client;
186 	struct {
187 		struct nvif_event_v0 base;
188 		struct nvif_chan_event_v0 host;
189 	} args;
190 	int ret;
191 
192 	INIT_WORK(&fctx->uevent_work, nouveau_fence_uevent_work);
193 	INIT_LIST_HEAD(&fctx->flip);
194 	INIT_LIST_HEAD(&fctx->pending);
195 	spin_lock_init(&fctx->lock);
196 	fctx->context = chan->drm->runl[chan->runlist].context_base + chan->chid;
197 
198 	if (chan == chan->drm->cechan)
199 		strcpy(fctx->name, "copy engine channel");
200 	else if (chan == chan->drm->channel)
201 		strcpy(fctx->name, "generic kernel channel");
202 	else
203 		strcpy(fctx->name, nvxx_client(&cli->base)->name);
204 
205 	kref_init(&fctx->fence_ref);
206 	if (!priv->uevent)
207 		return;
208 
209 	args.host.version = 0;
210 	args.host.type = NVIF_CHAN_EVENT_V0_NON_STALL_INTR;
211 
212 	ret = nvif_event_ctor(&chan->user, "fenceNonStallIntr", (chan->runlist << 16) | chan->chid,
213 			      nouveau_fence_wait_uevent_handler, false,
214 			      &args.base, sizeof(args), &fctx->event);
215 
216 	WARN_ON(ret);
217 }
218 
219 int
220 nouveau_fence_emit(struct nouveau_fence *fence)
221 {
222 	struct nouveau_channel *chan = unrcu_pointer(fence->channel);
223 	struct nouveau_fence_chan *fctx = chan->fence;
224 	struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
225 	int ret;
226 
227 	fence->timeout  = jiffies + (15 * HZ);
228 
229 	if (priv->uevent)
230 		dma_fence_init(&fence->base, &nouveau_fence_ops_uevent,
231 			       &fctx->lock, fctx->context, ++fctx->sequence);
232 	else
233 		dma_fence_init(&fence->base, &nouveau_fence_ops_legacy,
234 			       &fctx->lock, fctx->context, ++fctx->sequence);
235 	kref_get(&fctx->fence_ref);
236 
237 	ret = fctx->emit(fence);
238 	if (!ret) {
239 		dma_fence_get(&fence->base);
240 		spin_lock_irq(&fctx->lock);
241 
242 		if (unlikely(fctx->killed)) {
243 			spin_unlock_irq(&fctx->lock);
244 			dma_fence_put(&fence->base);
245 			return -ENODEV;
246 		}
247 
248 		if (nouveau_fence_update(chan, fctx))
249 			nvif_event_block(&fctx->event);
250 
251 		list_add_tail(&fence->head, &fctx->pending);
252 		spin_unlock_irq(&fctx->lock);
253 	}
254 
255 	return ret;
256 }
257 
258 bool
259 nouveau_fence_done(struct nouveau_fence *fence)
260 {
261 	if (fence->base.ops == &nouveau_fence_ops_legacy ||
262 	    fence->base.ops == &nouveau_fence_ops_uevent) {
263 		struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
264 		struct nouveau_channel *chan;
265 		unsigned long flags;
266 
267 		if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
268 			return true;
269 
270 		spin_lock_irqsave(&fctx->lock, flags);
271 		chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
272 		if (chan && nouveau_fence_update(chan, fctx))
273 			nvif_event_block(&fctx->event);
274 		spin_unlock_irqrestore(&fctx->lock, flags);
275 	}
276 	return dma_fence_is_signaled(&fence->base);
277 }
278 
279 static long
280 nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait)
281 {
282 	struct nouveau_fence *fence = from_fence(f);
283 	unsigned long sleep_time = NSEC_PER_MSEC / 1000;
284 	unsigned long t = jiffies, timeout = t + wait;
285 
286 	while (!nouveau_fence_done(fence)) {
287 		ktime_t kt;
288 
289 		t = jiffies;
290 
291 		if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
292 			__set_current_state(TASK_RUNNING);
293 			return 0;
294 		}
295 
296 		__set_current_state(intr ? TASK_INTERRUPTIBLE :
297 					   TASK_UNINTERRUPTIBLE);
298 
299 		kt = sleep_time;
300 		schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
301 		sleep_time *= 2;
302 		if (sleep_time > NSEC_PER_MSEC)
303 			sleep_time = NSEC_PER_MSEC;
304 
305 		if (intr && signal_pending(current))
306 			return -ERESTARTSYS;
307 	}
308 
309 	__set_current_state(TASK_RUNNING);
310 
311 	return timeout - t;
312 }
313 
314 int
315 nouveau_fence_wait(struct nouveau_fence *fence, bool intr)
316 {
317 	long ret;
318 
319 	ret = dma_fence_wait_timeout(&fence->base, intr, 15 * HZ);
320 	if (ret < 0)
321 		return ret;
322 	else if (!ret)
323 		return -EBUSY;
324 	else
325 		return 0;
326 }
327 
328 int
329 nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
330 		   bool exclusive, bool intr)
331 {
332 	struct nouveau_fence_chan *fctx = chan->fence;
333 	struct dma_resv *resv = nvbo->bo.base.resv;
334 	int i, ret;
335 
336 	ret = dma_resv_reserve_fences(resv, 1);
337 	if (ret)
338 		return ret;
339 
340 	/* Waiting for the writes first causes performance regressions
341 	 * under some circumstances. So manually wait for the reads first.
342 	 */
343 	for (i = 0; i < 2; ++i) {
344 		struct dma_resv_iter cursor;
345 		struct dma_fence *fence;
346 
347 		dma_resv_for_each_fence(&cursor, resv,
348 					dma_resv_usage_rw(exclusive),
349 					fence) {
350 			enum dma_resv_usage usage;
351 			struct nouveau_fence *f;
352 
353 			usage = dma_resv_iter_usage(&cursor);
354 			if (i == 0 && usage == DMA_RESV_USAGE_WRITE)
355 				continue;
356 
357 			f = nouveau_local_fence(fence, chan->drm);
358 			if (f) {
359 				struct nouveau_channel *prev;
360 				bool must_wait = true;
361 
362 				rcu_read_lock();
363 				prev = rcu_dereference(f->channel);
364 				if (prev && (prev == chan ||
365 					     fctx->sync(f, prev, chan) == 0))
366 					must_wait = false;
367 				rcu_read_unlock();
368 				if (!must_wait)
369 					continue;
370 			}
371 
372 			ret = dma_fence_wait(fence, intr);
373 			if (ret)
374 				return ret;
375 		}
376 	}
377 
378 	return 0;
379 }
380 
381 void
382 nouveau_fence_unref(struct nouveau_fence **pfence)
383 {
384 	if (*pfence)
385 		dma_fence_put(&(*pfence)->base);
386 	*pfence = NULL;
387 }
388 
389 int
390 nouveau_fence_create(struct nouveau_fence **pfence,
391 		     struct nouveau_channel *chan)
392 {
393 	struct nouveau_fence *fence;
394 
395 	if (unlikely(!chan->fence))
396 		return -ENODEV;
397 
398 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
399 	if (!fence)
400 		return -ENOMEM;
401 
402 	fence->channel = chan;
403 
404 	*pfence = fence;
405 	return 0;
406 }
407 
408 int
409 nouveau_fence_new(struct nouveau_fence **pfence,
410 		  struct nouveau_channel *chan)
411 {
412 	int ret = 0;
413 
414 	ret = nouveau_fence_create(pfence, chan);
415 	if (ret)
416 		return ret;
417 
418 	ret = nouveau_fence_emit(*pfence);
419 	if (ret)
420 		nouveau_fence_unref(pfence);
421 
422 	return ret;
423 }
424 
425 static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence)
426 {
427 	return "nouveau";
428 }
429 
430 static const char *nouveau_fence_get_timeline_name(struct dma_fence *f)
431 {
432 	struct nouveau_fence *fence = from_fence(f);
433 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
434 
435 	return !fctx->dead ? fctx->name : "dead channel";
436 }
437 
438 /*
439  * In an ideal world, read would not assume the channel context is still alive.
440  * This function may be called from another device, running into free memory as a
441  * result. The drm node should still be there, so we can derive the index from
442  * the fence context.
443  */
444 static bool nouveau_fence_is_signaled(struct dma_fence *f)
445 {
446 	struct nouveau_fence *fence = from_fence(f);
447 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
448 	struct nouveau_channel *chan;
449 	bool ret = false;
450 
451 	rcu_read_lock();
452 	chan = rcu_dereference(fence->channel);
453 	if (chan)
454 		ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
455 	rcu_read_unlock();
456 
457 	return ret;
458 }
459 
460 static bool nouveau_fence_no_signaling(struct dma_fence *f)
461 {
462 	struct nouveau_fence *fence = from_fence(f);
463 
464 	/*
465 	 * caller should have a reference on the fence,
466 	 * else fence could get freed here
467 	 */
468 	WARN_ON(kref_read(&fence->base.refcount) <= 1);
469 
470 	/*
471 	 * This needs uevents to work correctly, but dma_fence_add_callback relies on
472 	 * being able to enable signaling. It will still get signaled eventually,
473 	 * just not right away.
474 	 */
475 	if (nouveau_fence_is_signaled(f)) {
476 		list_del(&fence->head);
477 
478 		dma_fence_put(&fence->base);
479 		return false;
480 	}
481 
482 	return true;
483 }
484 
485 static void nouveau_fence_release(struct dma_fence *f)
486 {
487 	struct nouveau_fence *fence = from_fence(f);
488 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
489 
490 	kref_put(&fctx->fence_ref, nouveau_fence_context_put);
491 	dma_fence_free(&fence->base);
492 }
493 
494 static const struct dma_fence_ops nouveau_fence_ops_legacy = {
495 	.get_driver_name = nouveau_fence_get_get_driver_name,
496 	.get_timeline_name = nouveau_fence_get_timeline_name,
497 	.enable_signaling = nouveau_fence_no_signaling,
498 	.signaled = nouveau_fence_is_signaled,
499 	.wait = nouveau_fence_wait_legacy,
500 	.release = nouveau_fence_release
501 };
502 
503 static bool nouveau_fence_enable_signaling(struct dma_fence *f)
504 {
505 	struct nouveau_fence *fence = from_fence(f);
506 	struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
507 	bool ret;
508 
509 	if (!fctx->notify_ref++)
510 		nvif_event_allow(&fctx->event);
511 
512 	ret = nouveau_fence_no_signaling(f);
513 	if (ret)
514 		set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
515 	else if (!--fctx->notify_ref)
516 		nvif_event_block(&fctx->event);
517 
518 	return ret;
519 }
520 
521 static const struct dma_fence_ops nouveau_fence_ops_uevent = {
522 	.get_driver_name = nouveau_fence_get_get_driver_name,
523 	.get_timeline_name = nouveau_fence_get_timeline_name,
524 	.enable_signaling = nouveau_fence_enable_signaling,
525 	.signaled = nouveau_fence_is_signaled,
526 	.release = nouveau_fence_release
527 };
528