xref: /linux/drivers/gpu/drm/msm/msm_fence.c (revision a1c613ae4c322ddd58d5a8539dbfba2a0380a8c0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013-2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/dma-fence.h>
8 
9 #include "msm_drv.h"
10 #include "msm_fence.h"
11 #include "msm_gpu.h"
12 
fctx2gpu(struct msm_fence_context * fctx)13 static struct msm_gpu *fctx2gpu(struct msm_fence_context *fctx)
14 {
15 	struct msm_drm_private *priv = fctx->dev->dev_private;
16 	return priv->gpu;
17 }
18 
deadline_timer(struct hrtimer * t)19 static enum hrtimer_restart deadline_timer(struct hrtimer *t)
20 {
21 	struct msm_fence_context *fctx = container_of(t,
22 			struct msm_fence_context, deadline_timer);
23 
24 	kthread_queue_work(fctx2gpu(fctx)->worker, &fctx->deadline_work);
25 
26 	return HRTIMER_NORESTART;
27 }
28 
deadline_work(struct kthread_work * work)29 static void deadline_work(struct kthread_work *work)
30 {
31 	struct msm_fence_context *fctx = container_of(work,
32 			struct msm_fence_context, deadline_work);
33 
34 	/* If deadline fence has already passed, nothing to do: */
35 	if (msm_fence_completed(fctx, fctx->next_deadline_fence))
36 		return;
37 
38 	msm_devfreq_boost(fctx2gpu(fctx), 2);
39 }
40 
41 
42 struct msm_fence_context *
msm_fence_context_alloc(struct drm_device * dev,volatile uint32_t * fenceptr,const char * name)43 msm_fence_context_alloc(struct drm_device *dev, volatile uint32_t *fenceptr,
44 		const char *name)
45 {
46 	struct msm_fence_context *fctx;
47 	static int index = 0;
48 
49 	fctx = kzalloc(sizeof(*fctx), GFP_KERNEL);
50 	if (!fctx)
51 		return ERR_PTR(-ENOMEM);
52 
53 	fctx->dev = dev;
54 	strscpy(fctx->name, name, sizeof(fctx->name));
55 	fctx->context = dma_fence_context_alloc(1);
56 	fctx->index = index++;
57 	fctx->fenceptr = fenceptr;
58 	spin_lock_init(&fctx->spinlock);
59 
60 	/*
61 	 * Start out close to the 32b fence rollover point, so we can
62 	 * catch bugs with fence comparisons.
63 	 */
64 	fctx->last_fence = 0xffffff00;
65 	fctx->completed_fence = fctx->last_fence;
66 	*fctx->fenceptr = fctx->last_fence;
67 
68 	hrtimer_init(&fctx->deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
69 	fctx->deadline_timer.function = deadline_timer;
70 
71 	kthread_init_work(&fctx->deadline_work, deadline_work);
72 
73 	fctx->next_deadline = ktime_get();
74 
75 	return fctx;
76 }
77 
msm_fence_context_free(struct msm_fence_context * fctx)78 void msm_fence_context_free(struct msm_fence_context *fctx)
79 {
80 	kfree(fctx);
81 }
82 
msm_fence_completed(struct msm_fence_context * fctx,uint32_t fence)83 bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence)
84 {
85 	/*
86 	 * Note: Check completed_fence first, as fenceptr is in a write-combine
87 	 * mapping, so it will be more expensive to read.
88 	 */
89 	return (int32_t)(fctx->completed_fence - fence) >= 0 ||
90 		(int32_t)(*fctx->fenceptr - fence) >= 0;
91 }
92 
93 /* called from irq handler and workqueue (in recover path) */
msm_update_fence(struct msm_fence_context * fctx,uint32_t fence)94 void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence)
95 {
96 	unsigned long flags;
97 
98 	spin_lock_irqsave(&fctx->spinlock, flags);
99 	if (fence_after(fence, fctx->completed_fence))
100 		fctx->completed_fence = fence;
101 	if (msm_fence_completed(fctx, fctx->next_deadline_fence))
102 		hrtimer_cancel(&fctx->deadline_timer);
103 	spin_unlock_irqrestore(&fctx->spinlock, flags);
104 }
105 
106 struct msm_fence {
107 	struct dma_fence base;
108 	struct msm_fence_context *fctx;
109 };
110 
to_msm_fence(struct dma_fence * fence)111 static inline struct msm_fence *to_msm_fence(struct dma_fence *fence)
112 {
113 	return container_of(fence, struct msm_fence, base);
114 }
115 
msm_fence_get_driver_name(struct dma_fence * fence)116 static const char *msm_fence_get_driver_name(struct dma_fence *fence)
117 {
118 	return "msm";
119 }
120 
msm_fence_get_timeline_name(struct dma_fence * fence)121 static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
122 {
123 	struct msm_fence *f = to_msm_fence(fence);
124 	return f->fctx->name;
125 }
126 
msm_fence_signaled(struct dma_fence * fence)127 static bool msm_fence_signaled(struct dma_fence *fence)
128 {
129 	struct msm_fence *f = to_msm_fence(fence);
130 	return msm_fence_completed(f->fctx, f->base.seqno);
131 }
132 
msm_fence_set_deadline(struct dma_fence * fence,ktime_t deadline)133 static void msm_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
134 {
135 	struct msm_fence *f = to_msm_fence(fence);
136 	struct msm_fence_context *fctx = f->fctx;
137 	unsigned long flags;
138 	ktime_t now;
139 
140 	spin_lock_irqsave(&fctx->spinlock, flags);
141 	now = ktime_get();
142 
143 	if (ktime_after(now, fctx->next_deadline) ||
144 			ktime_before(deadline, fctx->next_deadline)) {
145 		fctx->next_deadline = deadline;
146 		fctx->next_deadline_fence =
147 			max(fctx->next_deadline_fence, (uint32_t)fence->seqno);
148 
149 		/*
150 		 * Set timer to trigger boost 3ms before deadline, or
151 		 * if we are already less than 3ms before the deadline
152 		 * schedule boost work immediately.
153 		 */
154 		deadline = ktime_sub(deadline, ms_to_ktime(3));
155 
156 		if (ktime_after(now, deadline)) {
157 			kthread_queue_work(fctx2gpu(fctx)->worker,
158 					&fctx->deadline_work);
159 		} else {
160 			hrtimer_start(&fctx->deadline_timer, deadline,
161 					HRTIMER_MODE_ABS);
162 		}
163 	}
164 
165 	spin_unlock_irqrestore(&fctx->spinlock, flags);
166 }
167 
168 static const struct dma_fence_ops msm_fence_ops = {
169 	.get_driver_name = msm_fence_get_driver_name,
170 	.get_timeline_name = msm_fence_get_timeline_name,
171 	.signaled = msm_fence_signaled,
172 	.set_deadline = msm_fence_set_deadline,
173 };
174 
175 struct dma_fence *
msm_fence_alloc(void)176 msm_fence_alloc(void)
177 {
178 	struct msm_fence *f;
179 
180 	f = kzalloc(sizeof(*f), GFP_KERNEL);
181 	if (!f)
182 		return ERR_PTR(-ENOMEM);
183 
184 	return &f->base;
185 }
186 
187 void
msm_fence_init(struct dma_fence * fence,struct msm_fence_context * fctx)188 msm_fence_init(struct dma_fence *fence, struct msm_fence_context *fctx)
189 {
190 	struct msm_fence *f = to_msm_fence(fence);
191 
192 	f->fctx = fctx;
193 
194 	/*
195 	 * Until this point, the fence was just some pre-allocated memory,
196 	 * no-one should have taken a reference to it yet.
197 	 */
198 	WARN_ON(kref_read(&fence->refcount));
199 
200 	dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock,
201 		       fctx->context, ++fctx->last_fence);
202 }
203