xref: /linux/drivers/dma-buf/dma-fence-array.c (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * dma-fence-array: aggregate fences to be waited together
4  *
5  * Copyright (C) 2016 Collabora Ltd
6  * Copyright (C) 2016 Advanced Micro Devices, Inc.
7  * Authors:
8  *	Gustavo Padovan <gustavo@padovan.org>
9  *	Christian König <christian.koenig@amd.com>
10  */
11 
12 #include <linux/export.h>
13 #include <linux/slab.h>
14 #include <linux/dma-fence-array.h>
15 
16 #define PENDING_ERROR 1
17 
18 static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
19 {
20 	return "dma_fence_array";
21 }
22 
23 static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
24 {
25 	return "unbound";
26 }
27 
28 static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
29 					      int error)
30 {
31 	/*
32 	 * Propagate the first error reported by any of our fences, but only
33 	 * before we ourselves are signaled.
34 	 */
35 	if (error)
36 		cmpxchg(&array->base.error, PENDING_ERROR, error);
37 }
38 
39 static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
40 {
41 	/* Clear the error flag if not actually set. */
42 	cmpxchg(&array->base.error, PENDING_ERROR, 0);
43 }
44 
45 static void irq_dma_fence_array_work(struct irq_work *wrk)
46 {
47 	struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
48 
49 	dma_fence_array_clear_pending_error(array);
50 
51 	dma_fence_signal(&array->base);
52 	dma_fence_put(&array->base);
53 }
54 
55 static void dma_fence_array_cb_func(struct dma_fence *f,
56 				    struct dma_fence_cb *cb)
57 {
58 	struct dma_fence_array_cb *array_cb =
59 		container_of(cb, struct dma_fence_array_cb, cb);
60 	struct dma_fence_array *array = array_cb->array;
61 
62 	dma_fence_array_set_pending_error(array, f->error);
63 
64 	if (atomic_dec_and_test(&array->num_pending))
65 		irq_work_queue(&array->work);
66 	else
67 		dma_fence_put(&array->base);
68 }
69 
70 static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
71 {
72 	struct dma_fence_array *array = to_dma_fence_array(fence);
73 	struct dma_fence_array_cb *cb = (void *)(&array[1]);
74 	unsigned i;
75 
76 	for (i = 0; i < array->num_fences; ++i) {
77 		cb[i].array = array;
78 		/*
79 		 * As we may report that the fence is signaled before all
80 		 * callbacks are complete, we need to take an additional
81 		 * reference count on the array so that we do not free it too
82 		 * early. The core fence handling will only hold the reference
83 		 * until we signal the array as complete (but that is now
84 		 * insufficient).
85 		 */
86 		dma_fence_get(&array->base);
87 		if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
88 					   dma_fence_array_cb_func)) {
89 			int error = array->fences[i]->error;
90 
91 			dma_fence_array_set_pending_error(array, error);
92 			dma_fence_put(&array->base);
93 			if (atomic_dec_and_test(&array->num_pending)) {
94 				dma_fence_array_clear_pending_error(array);
95 				return false;
96 			}
97 		}
98 	}
99 
100 	return true;
101 }
102 
103 static bool dma_fence_array_signaled(struct dma_fence *fence)
104 {
105 	struct dma_fence_array *array = to_dma_fence_array(fence);
106 
107 	if (atomic_read(&array->num_pending) > 0)
108 		return false;
109 
110 	dma_fence_array_clear_pending_error(array);
111 	return true;
112 }
113 
114 static void dma_fence_array_release(struct dma_fence *fence)
115 {
116 	struct dma_fence_array *array = to_dma_fence_array(fence);
117 	unsigned i;
118 
119 	for (i = 0; i < array->num_fences; ++i)
120 		dma_fence_put(array->fences[i]);
121 
122 	kfree(array->fences);
123 	dma_fence_free(fence);
124 }
125 
126 const struct dma_fence_ops dma_fence_array_ops = {
127 	.get_driver_name = dma_fence_array_get_driver_name,
128 	.get_timeline_name = dma_fence_array_get_timeline_name,
129 	.enable_signaling = dma_fence_array_enable_signaling,
130 	.signaled = dma_fence_array_signaled,
131 	.release = dma_fence_array_release,
132 };
133 EXPORT_SYMBOL(dma_fence_array_ops);
134 
135 /**
136  * dma_fence_array_create - Create a custom fence array
137  * @num_fences:		[in]	number of fences to add in the array
138  * @fences:		[in]	array containing the fences
139  * @context:		[in]	fence context to use
140  * @seqno:		[in]	sequence number to use
141  * @signal_on_any:	[in]	signal on any fence in the array
142  *
143  * Allocate a dma_fence_array object and initialize the base fence with
144  * dma_fence_init().
145  * In case of error it returns NULL.
146  *
147  * The caller should allocate the fences array with num_fences size
148  * and fill it with the fences it wants to add to the object. Ownership of this
149  * array is taken and dma_fence_put() is used on each fence on release.
150  *
151  * If @signal_on_any is true the fence array signals if any fence in the array
152  * signals, otherwise it signals when all fences in the array signal.
153  */
154 struct dma_fence_array *dma_fence_array_create(int num_fences,
155 					       struct dma_fence **fences,
156 					       u64 context, unsigned seqno,
157 					       bool signal_on_any)
158 {
159 	struct dma_fence_array *array;
160 	size_t size = sizeof(*array);
161 
162 	WARN_ON(!num_fences || !fences);
163 
164 	/* Allocate the callback structures behind the array. */
165 	size += num_fences * sizeof(struct dma_fence_array_cb);
166 	array = kzalloc(size, GFP_KERNEL);
167 	if (!array)
168 		return NULL;
169 
170 	spin_lock_init(&array->lock);
171 	dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
172 		       context, seqno);
173 	init_irq_work(&array->work, irq_dma_fence_array_work);
174 
175 	array->num_fences = num_fences;
176 	atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
177 	array->fences = fences;
178 
179 	array->base.error = PENDING_ERROR;
180 
181 	/*
182 	 * dma_fence_array objects should never contain any other fence
183 	 * containers or otherwise we run into recursion and potential kernel
184 	 * stack overflow on operations on the dma_fence_array.
185 	 *
186 	 * The correct way of handling this is to flatten out the array by the
187 	 * caller instead.
188 	 *
189 	 * Enforce this here by checking that we don't create a dma_fence_array
190 	 * with any container inside.
191 	 */
192 	while (num_fences--)
193 		WARN_ON(dma_fence_is_container(fences[num_fences]));
194 
195 	return array;
196 }
197 EXPORT_SYMBOL(dma_fence_array_create);
198 
199 /**
200  * dma_fence_match_context - Check if all fences are from the given context
201  * @fence:		[in]	fence or fence array
202  * @context:		[in]	fence context to check all fences against
203  *
204  * Checks the provided fence or, for a fence array, all fences in the array
205  * against the given context. Returns false if any fence is from a different
206  * context.
207  */
208 bool dma_fence_match_context(struct dma_fence *fence, u64 context)
209 {
210 	struct dma_fence_array *array = to_dma_fence_array(fence);
211 	unsigned i;
212 
213 	if (!dma_fence_is_array(fence))
214 		return fence->context == context;
215 
216 	for (i = 0; i < array->num_fences; i++) {
217 		if (array->fences[i]->context != context)
218 			return false;
219 	}
220 
221 	return true;
222 }
223 EXPORT_SYMBOL(dma_fence_match_context);
224 
225 struct dma_fence *dma_fence_array_first(struct dma_fence *head)
226 {
227 	struct dma_fence_array *array;
228 
229 	if (!head)
230 		return NULL;
231 
232 	array = to_dma_fence_array(head);
233 	if (!array)
234 		return head;
235 
236 	if (!array->num_fences)
237 		return NULL;
238 
239 	return array->fences[0];
240 }
241 EXPORT_SYMBOL(dma_fence_array_first);
242 
243 struct dma_fence *dma_fence_array_next(struct dma_fence *head,
244 				       unsigned int index)
245 {
246 	struct dma_fence_array *array = to_dma_fence_array(head);
247 
248 	if (!array || index >= array->num_fences)
249 		return NULL;
250 
251 	return array->fences[index];
252 }
253 EXPORT_SYMBOL(dma_fence_array_next);
254