xref: /linux/drivers/gpu/drm/xe/xe_hw_fence.c (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_hw_fence.h"
7 
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 
11 #include "xe_bo.h"
12 #include "xe_device.h"
13 #include "xe_gt.h"
14 #include "xe_hw_engine.h"
15 #include "xe_macros.h"
16 #include "xe_map.h"
17 #include "xe_trace.h"
18 
19 static struct kmem_cache *xe_hw_fence_slab;
20 
21 int __init xe_hw_fence_module_init(void)
22 {
23 	xe_hw_fence_slab = kmem_cache_create("xe_hw_fence",
24 					     sizeof(struct xe_hw_fence), 0,
25 					     SLAB_HWCACHE_ALIGN, NULL);
26 	if (!xe_hw_fence_slab)
27 		return -ENOMEM;
28 
29 	return 0;
30 }
31 
32 void xe_hw_fence_module_exit(void)
33 {
34 	rcu_barrier();
35 	kmem_cache_destroy(xe_hw_fence_slab);
36 }
37 
38 static struct xe_hw_fence *fence_alloc(void)
39 {
40 	return kmem_cache_zalloc(xe_hw_fence_slab, GFP_KERNEL);
41 }
42 
43 static void fence_free(struct rcu_head *rcu)
44 {
45 	struct xe_hw_fence *fence =
46 		container_of(rcu, struct xe_hw_fence, dma.rcu);
47 
48 	if (!WARN_ON_ONCE(!fence))
49 		kmem_cache_free(xe_hw_fence_slab, fence);
50 }
51 
52 static void hw_fence_irq_run_cb(struct irq_work *work)
53 {
54 	struct xe_hw_fence_irq *irq = container_of(work, typeof(*irq), work);
55 	struct xe_hw_fence *fence, *next;
56 	bool tmp;
57 
58 	tmp = dma_fence_begin_signalling();
59 	spin_lock(&irq->lock);
60 	if (irq->enabled) {
61 		list_for_each_entry_safe(fence, next, &irq->pending, irq_link) {
62 			struct dma_fence *dma_fence = &fence->dma;
63 
64 			trace_xe_hw_fence_try_signal(fence);
65 			if (dma_fence_is_signaled_locked(dma_fence)) {
66 				trace_xe_hw_fence_signal(fence);
67 				list_del_init(&fence->irq_link);
68 				dma_fence_put(dma_fence);
69 			}
70 		}
71 	}
72 	spin_unlock(&irq->lock);
73 	dma_fence_end_signalling(tmp);
74 }
75 
76 void xe_hw_fence_irq_init(struct xe_hw_fence_irq *irq)
77 {
78 	spin_lock_init(&irq->lock);
79 	init_irq_work(&irq->work, hw_fence_irq_run_cb);
80 	INIT_LIST_HEAD(&irq->pending);
81 	irq->enabled = true;
82 }
83 
84 void xe_hw_fence_irq_finish(struct xe_hw_fence_irq *irq)
85 {
86 	struct xe_hw_fence *fence, *next;
87 	unsigned long flags;
88 	int err;
89 	bool tmp;
90 
91 	if (XE_WARN_ON(!list_empty(&irq->pending))) {
92 		tmp = dma_fence_begin_signalling();
93 		spin_lock_irqsave(&irq->lock, flags);
94 		list_for_each_entry_safe(fence, next, &irq->pending, irq_link) {
95 			list_del_init(&fence->irq_link);
96 			err = dma_fence_signal_locked(&fence->dma);
97 			dma_fence_put(&fence->dma);
98 			XE_WARN_ON(err);
99 		}
100 		spin_unlock_irqrestore(&irq->lock, flags);
101 		dma_fence_end_signalling(tmp);
102 	}
103 }
104 
105 void xe_hw_fence_irq_run(struct xe_hw_fence_irq *irq)
106 {
107 	irq_work_queue(&irq->work);
108 }
109 
110 void xe_hw_fence_irq_stop(struct xe_hw_fence_irq *irq)
111 {
112 	spin_lock_irq(&irq->lock);
113 	irq->enabled = false;
114 	spin_unlock_irq(&irq->lock);
115 }
116 
117 void xe_hw_fence_irq_start(struct xe_hw_fence_irq *irq)
118 {
119 	spin_lock_irq(&irq->lock);
120 	irq->enabled = true;
121 	spin_unlock_irq(&irq->lock);
122 
123 	irq_work_queue(&irq->work);
124 }
125 
126 void xe_hw_fence_ctx_init(struct xe_hw_fence_ctx *ctx, struct xe_gt *gt,
127 			  struct xe_hw_fence_irq *irq, const char *name)
128 {
129 	ctx->gt = gt;
130 	ctx->irq = irq;
131 	ctx->dma_fence_ctx = dma_fence_context_alloc(1);
132 	ctx->next_seqno = XE_FENCE_INITIAL_SEQNO;
133 	snprintf(ctx->name, sizeof(ctx->name), "%s", name);
134 }
135 
136 void xe_hw_fence_ctx_finish(struct xe_hw_fence_ctx *ctx)
137 {
138 }
139 
140 static struct xe_hw_fence *to_xe_hw_fence(struct dma_fence *fence);
141 
142 static struct xe_hw_fence_irq *xe_hw_fence_irq(struct xe_hw_fence *fence)
143 {
144 	return container_of(fence->dma.lock, struct xe_hw_fence_irq, lock);
145 }
146 
147 static const char *xe_hw_fence_get_driver_name(struct dma_fence *dma_fence)
148 {
149 	struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
150 
151 	return dev_name(fence->xe->drm.dev);
152 }
153 
154 static const char *xe_hw_fence_get_timeline_name(struct dma_fence *dma_fence)
155 {
156 	struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
157 
158 	return fence->name;
159 }
160 
161 static bool xe_hw_fence_signaled(struct dma_fence *dma_fence)
162 {
163 	struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
164 	struct xe_device *xe = fence->xe;
165 	u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32);
166 
167 	return dma_fence->error ||
168 		!__dma_fence_is_later(dma_fence->seqno, seqno, dma_fence->ops);
169 }
170 
171 static bool xe_hw_fence_enable_signaling(struct dma_fence *dma_fence)
172 {
173 	struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
174 	struct xe_hw_fence_irq *irq = xe_hw_fence_irq(fence);
175 
176 	dma_fence_get(dma_fence);
177 	list_add_tail(&fence->irq_link, &irq->pending);
178 
179 	/* SW completed (no HW IRQ) so kick handler to signal fence */
180 	if (xe_hw_fence_signaled(dma_fence))
181 		xe_hw_fence_irq_run(irq);
182 
183 	return true;
184 }
185 
186 static void xe_hw_fence_release(struct dma_fence *dma_fence)
187 {
188 	struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
189 
190 	XE_WARN_ON(!list_empty(&fence->irq_link));
191 	call_rcu(&dma_fence->rcu, fence_free);
192 }
193 
194 static const struct dma_fence_ops xe_hw_fence_ops = {
195 	.get_driver_name = xe_hw_fence_get_driver_name,
196 	.get_timeline_name = xe_hw_fence_get_timeline_name,
197 	.enable_signaling = xe_hw_fence_enable_signaling,
198 	.signaled = xe_hw_fence_signaled,
199 	.release = xe_hw_fence_release,
200 };
201 
202 static struct xe_hw_fence *to_xe_hw_fence(struct dma_fence *fence)
203 {
204 	if (XE_WARN_ON(fence->ops != &xe_hw_fence_ops))
205 		return NULL;
206 
207 	return container_of(fence, struct xe_hw_fence, dma);
208 }
209 
210 /**
211  * xe_hw_fence_alloc() -  Allocate an hw fence.
212  *
213  * Allocate but don't initialize an hw fence.
214  *
215  * Return: Pointer to the allocated fence or
216  * negative error pointer on error.
217  */
218 struct dma_fence *xe_hw_fence_alloc(void)
219 {
220 	struct xe_hw_fence *hw_fence = fence_alloc();
221 
222 	if (!hw_fence)
223 		return ERR_PTR(-ENOMEM);
224 
225 	return &hw_fence->dma;
226 }
227 
228 /**
229  * xe_hw_fence_free() - Free an hw fence.
230  * @fence: Pointer to the fence to free.
231  *
232  * Frees an hw fence that hasn't yet been
233  * initialized.
234  */
235 void xe_hw_fence_free(struct dma_fence *fence)
236 {
237 	fence_free(&fence->rcu);
238 }
239 
240 /**
241  * xe_hw_fence_init() - Initialize an hw fence.
242  * @fence: Pointer to the fence to initialize.
243  * @ctx: Pointer to the struct xe_hw_fence_ctx fence context.
244  * @seqno_map: Pointer to the map into where the seqno is blitted.
245  *
246  * Initializes a pre-allocated hw fence.
247  * After initialization, the fence is subject to normal
248  * dma-fence refcounting.
249  */
250 void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx,
251 		      struct iosys_map seqno_map)
252 {
253 	struct  xe_hw_fence *hw_fence =
254 		container_of(fence, typeof(*hw_fence), dma);
255 
256 	hw_fence->xe = gt_to_xe(ctx->gt);
257 	snprintf(hw_fence->name, sizeof(hw_fence->name), "%s", ctx->name);
258 	hw_fence->seqno_map = seqno_map;
259 	INIT_LIST_HEAD(&hw_fence->irq_link);
260 
261 	dma_fence_init(fence, &xe_hw_fence_ops, &ctx->irq->lock,
262 		       ctx->dma_fence_ctx, ctx->next_seqno++);
263 
264 	trace_xe_hw_fence_create(hw_fence);
265 }
266