1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2013-2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 */
6
7 #ifndef __MSM_FENCE_H__
8 #define __MSM_FENCE_H__
9
10 #include "msm_drv.h"
11
12 /**
13 * struct msm_fence_context - fence context for gpu
14 *
15 * Each ringbuffer has a single fence context, with the GPU writing an
16 * incrementing fence seqno at the end of each submit
17 */
18 struct msm_fence_context {
19 /** @dev: the drm device */
20 struct drm_device *dev;
21 /** @name: human readable name for fence timeline */
22 char name[32];
23 /** @context: see dma_fence_context_alloc() */
24 unsigned context;
25 /** @index: similar to context, but local to msm_fence_context's */
26 unsigned index;
27 /**
28 * @last_fence:
29 * Last assigned fence, incremented each time a fence is created
30 * on this fence context. If last_fence == completed_fence,
31 * there is no remaining pending work
32 */
33 uint32_t last_fence;
34 /**
35 * @completed_fence:
36 * The last completed fence, updated from the CPU after interrupt
37 * from GPU
38 */
39 uint32_t completed_fence;
40 /**
41 * @fenceptr:
42 * The address that the GPU directly writes with completed fence
43 * seqno. This can be ahead of completed_fence. We can peek at
44 * this to see if a fence has already signaled but the CPU hasn't
45 * gotten around to handling the irq and updating completed_fence
46 */
47 volatile uint32_t *fenceptr;
48
49 /**
50 * @spinlock: fence context spinlock
51 */
52 spinlock_t spinlock;
53
54 /*
55 * TODO this doesn't really deal with multiple deadlines, like
56 * if userspace got multiple frames ahead.. OTOH atomic updates
57 * don't queue, so maybe that is ok
58 */
59
60 /** @next_deadline: Time of next deadline */
61 ktime_t next_deadline;
62 /**
63 * @next_deadline_fence:
64 * Fence value for next pending deadline. The deadline timer is
65 * canceled when this fence is signaled.
66 */
67 uint32_t next_deadline_fence;
68 /**
69 * @deadline_timer: tracks nearest deadline of a fence timeline and
70 * expires just before it.
71 */
72 struct hrtimer deadline_timer;
73 /**
74 * @deadline_work: work to do after deadline_timer expires
75 */
76 struct kthread_work deadline_work;
77 };
78
79 struct msm_fence_context * msm_fence_context_alloc(struct drm_device *dev,
80 volatile uint32_t *fenceptr, const char *name);
81 void msm_fence_context_free(struct msm_fence_context *fctx);
82
83 bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence);
84 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
85
86 struct dma_fence * msm_fence_alloc(void);
87 void msm_fence_init(struct dma_fence *fence, struct msm_fence_context *fctx);
88
89 static inline bool
fence_before(uint32_t a,uint32_t b)90 fence_before(uint32_t a, uint32_t b)
91 {
92 return (int32_t)(a - b) < 0;
93 }
94
95 static inline bool
fence_after(uint32_t a,uint32_t b)96 fence_after(uint32_t a, uint32_t b)
97 {
98 return (int32_t)(a - b) > 0;
99 }
100
101 #endif
102