xref: /linux/drivers/dma-buf/dma-fence-chain.c (revision 1c7c3237c0cc4ad3c7b0df458290c8e2a652f178)
1 /*
2  * fence-chain: chain fences together in a timeline
3  *
4  * Copyright (C) 2018 Advanced Micro Devices, Inc.
5  * Authors:
6  *	Christian König <christian.koenig@amd.com>
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms of the GNU General Public License version 2 as published by
10  * the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  */
17 
18 #include <linux/dma-fence-chain.h>
19 
20 static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
21 
22 /**
23  * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
24  * @chain: chain node to get the previous node from
25  *
26  * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
27  * chain node.
28  */
29 static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
30 {
31 	struct dma_fence *prev;
32 
33 	rcu_read_lock();
34 	prev = dma_fence_get_rcu_safe(&chain->prev);
35 	rcu_read_unlock();
36 	return prev;
37 }
38 
39 /**
40  * dma_fence_chain_walk - chain walking function
41  * @fence: current chain node
42  *
43  * Walk the chain to the next node. Returns the next fence or NULL if we are at
44  * the end of the chain. Garbage collects chain nodes which are already
45  * signaled.
46  */
47 struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
48 {
49 	struct dma_fence_chain *chain, *prev_chain;
50 	struct dma_fence *prev, *replacement, *tmp;
51 
52 	chain = to_dma_fence_chain(fence);
53 	if (!chain) {
54 		dma_fence_put(fence);
55 		return NULL;
56 	}
57 
58 	while ((prev = dma_fence_chain_get_prev(chain))) {
59 
60 		prev_chain = to_dma_fence_chain(prev);
61 		if (prev_chain) {
62 			if (!dma_fence_is_signaled(prev_chain->fence))
63 				break;
64 
65 			replacement = dma_fence_chain_get_prev(prev_chain);
66 		} else {
67 			if (!dma_fence_is_signaled(prev))
68 				break;
69 
70 			replacement = NULL;
71 		}
72 
73 		tmp = cmpxchg((void **)&chain->prev, (void *)prev, (void *)replacement);
74 		if (tmp == prev)
75 			dma_fence_put(tmp);
76 		else
77 			dma_fence_put(replacement);
78 		dma_fence_put(prev);
79 	}
80 
81 	dma_fence_put(fence);
82 	return prev;
83 }
84 EXPORT_SYMBOL(dma_fence_chain_walk);
85 
86 /**
87  * dma_fence_chain_find_seqno - find fence chain node by seqno
88  * @pfence: pointer to the chain node where to start
89  * @seqno: the sequence number to search for
90  *
91  * Advance the fence pointer to the chain node which will signal this sequence
92  * number. If no sequence number is provided then this is a no-op.
93  *
94  * Returns EINVAL if the fence is not a chain node or the sequence number has
95  * not yet advanced far enough.
96  */
97 int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
98 {
99 	struct dma_fence_chain *chain;
100 
101 	if (!seqno)
102 		return 0;
103 
104 	chain = to_dma_fence_chain(*pfence);
105 	if (!chain || chain->base.seqno < seqno)
106 		return -EINVAL;
107 
108 	dma_fence_chain_for_each(*pfence, &chain->base) {
109 		if ((*pfence)->context != chain->base.context ||
110 		    to_dma_fence_chain(*pfence)->prev_seqno < seqno)
111 			break;
112 	}
113 	dma_fence_put(&chain->base);
114 
115 	return 0;
116 }
117 EXPORT_SYMBOL(dma_fence_chain_find_seqno);
118 
119 static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
120 {
121         return "dma_fence_chain";
122 }
123 
124 static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
125 {
126         return "unbound";
127 }
128 
129 static void dma_fence_chain_irq_work(struct irq_work *work)
130 {
131 	struct dma_fence_chain *chain;
132 
133 	chain = container_of(work, typeof(*chain), work);
134 
135 	/* Try to rearm the callback */
136 	if (!dma_fence_chain_enable_signaling(&chain->base))
137 		/* Ok, we are done. No more unsignaled fences left */
138 		dma_fence_signal(&chain->base);
139 	dma_fence_put(&chain->base);
140 }
141 
142 static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
143 {
144 	struct dma_fence_chain *chain;
145 
146 	chain = container_of(cb, typeof(*chain), cb);
147 	irq_work_queue(&chain->work);
148 	dma_fence_put(f);
149 }
150 
151 static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
152 {
153 	struct dma_fence_chain *head = to_dma_fence_chain(fence);
154 
155 	dma_fence_get(&head->base);
156 	dma_fence_chain_for_each(fence, &head->base) {
157 		struct dma_fence_chain *chain = to_dma_fence_chain(fence);
158 		struct dma_fence *f = chain ? chain->fence : fence;
159 
160 		dma_fence_get(f);
161 		if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
162 			dma_fence_put(fence);
163 			return true;
164 		}
165 		dma_fence_put(f);
166 	}
167 	dma_fence_put(&head->base);
168 	return false;
169 }
170 
171 static bool dma_fence_chain_signaled(struct dma_fence *fence)
172 {
173 	dma_fence_chain_for_each(fence, fence) {
174 		struct dma_fence_chain *chain = to_dma_fence_chain(fence);
175 		struct dma_fence *f = chain ? chain->fence : fence;
176 
177 		if (!dma_fence_is_signaled(f)) {
178 			dma_fence_put(fence);
179 			return false;
180 		}
181 	}
182 
183 	return true;
184 }
185 
186 static void dma_fence_chain_release(struct dma_fence *fence)
187 {
188 	struct dma_fence_chain *chain = to_dma_fence_chain(fence);
189 
190 	dma_fence_put(rcu_dereference_protected(chain->prev, true));
191 	dma_fence_put(chain->fence);
192 	dma_fence_free(fence);
193 }
194 
195 const struct dma_fence_ops dma_fence_chain_ops = {
196 	.use_64bit_seqno = true,
197 	.get_driver_name = dma_fence_chain_get_driver_name,
198 	.get_timeline_name = dma_fence_chain_get_timeline_name,
199 	.enable_signaling = dma_fence_chain_enable_signaling,
200 	.signaled = dma_fence_chain_signaled,
201 	.release = dma_fence_chain_release,
202 };
203 EXPORT_SYMBOL(dma_fence_chain_ops);
204 
205 /**
206  * dma_fence_chain_init - initialize a fence chain
207  * @chain: the chain node to initialize
208  * @prev: the previous fence
209  * @fence: the current fence
210  *
211  * Initialize a new chain node and either start a new chain or add the node to
212  * the existing chain of the previous fence.
213  */
214 void dma_fence_chain_init(struct dma_fence_chain *chain,
215 			  struct dma_fence *prev,
216 			  struct dma_fence *fence,
217 			  uint64_t seqno)
218 {
219 	struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
220 	uint64_t context;
221 
222 	spin_lock_init(&chain->lock);
223 	rcu_assign_pointer(chain->prev, prev);
224 	chain->fence = fence;
225 	chain->prev_seqno = 0;
226 	init_irq_work(&chain->work, dma_fence_chain_irq_work);
227 
228 	/* Try to reuse the context of the previous chain node. */
229 	if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
230 		context = prev->context;
231 		chain->prev_seqno = prev->seqno;
232 	} else {
233 		context = dma_fence_context_alloc(1);
234 		/* Make sure that we always have a valid sequence number. */
235 		if (prev_chain)
236 			seqno = max(prev->seqno, seqno);
237 	}
238 
239 	dma_fence_init(&chain->base, &dma_fence_chain_ops,
240 		       &chain->lock, context, seqno);
241 }
242 EXPORT_SYMBOL(dma_fence_chain_init);
243