xref: /linux/drivers/gpu/drm/imagination/pvr_sync.c (revision 2b0cfa6e49566c8fa6759734cf821aa6e8271a9e)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include <uapi/drm/pvr_drm.h>
5 
6 #include <drm/drm_syncobj.h>
7 #include <drm/gpu_scheduler.h>
8 #include <linux/xarray.h>
9 #include <linux/dma-fence-unwrap.h>
10 
11 #include "pvr_device.h"
12 #include "pvr_queue.h"
13 #include "pvr_sync.h"
14 
15 static int
16 pvr_check_sync_op(const struct drm_pvr_sync_op *sync_op)
17 {
18 	u8 handle_type;
19 
20 	if (sync_op->flags & ~DRM_PVR_SYNC_OP_FLAGS_MASK)
21 		return -EINVAL;
22 
23 	handle_type = sync_op->flags & DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK;
24 	if (handle_type != DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ &&
25 	    handle_type != DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ)
26 		return -EINVAL;
27 
28 	if (handle_type == DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ &&
29 	    sync_op->value != 0)
30 		return -EINVAL;
31 
32 	return 0;
33 }
34 
35 static void
36 pvr_sync_signal_free(struct pvr_sync_signal *sig_sync)
37 {
38 	if (!sig_sync)
39 		return;
40 
41 	drm_syncobj_put(sig_sync->syncobj);
42 	dma_fence_chain_free(sig_sync->chain);
43 	dma_fence_put(sig_sync->fence);
44 	kfree(sig_sync);
45 }
46 
47 void
48 pvr_sync_signal_array_cleanup(struct xarray *array)
49 {
50 	struct pvr_sync_signal *sig_sync;
51 	unsigned long i;
52 
53 	xa_for_each(array, i, sig_sync)
54 		pvr_sync_signal_free(sig_sync);
55 
56 	xa_destroy(array);
57 }
58 
59 static struct pvr_sync_signal *
60 pvr_sync_signal_array_add(struct xarray *array, struct drm_file *file, u32 handle, u64 point)
61 {
62 	struct pvr_sync_signal *sig_sync;
63 	struct dma_fence *cur_fence;
64 	int err;
65 	u32 id;
66 
67 	sig_sync = kzalloc(sizeof(*sig_sync), GFP_KERNEL);
68 	if (!sig_sync)
69 		return ERR_PTR(-ENOMEM);
70 
71 	sig_sync->handle = handle;
72 	sig_sync->point = point;
73 
74 	if (point > 0) {
75 		sig_sync->chain = dma_fence_chain_alloc();
76 		if (!sig_sync->chain) {
77 			err = -ENOMEM;
78 			goto err_free_sig_sync;
79 		}
80 	}
81 
82 	sig_sync->syncobj = drm_syncobj_find(file, handle);
83 	if (!sig_sync->syncobj) {
84 		err = -EINVAL;
85 		goto err_free_sig_sync;
86 	}
87 
88 	/* Retrieve the current fence attached to that point. It's
89 	 * perfectly fine to get a NULL fence here, it just means there's
90 	 * no fence attached to that point yet.
91 	 */
92 	if (!drm_syncobj_find_fence(file, handle, point, 0, &cur_fence))
93 		sig_sync->fence = cur_fence;
94 
95 	err = xa_alloc(array, &id, sig_sync, xa_limit_32b, GFP_KERNEL);
96 	if (err)
97 		goto err_free_sig_sync;
98 
99 	return sig_sync;
100 
101 err_free_sig_sync:
102 	pvr_sync_signal_free(sig_sync);
103 	return ERR_PTR(err);
104 }
105 
106 static struct pvr_sync_signal *
107 pvr_sync_signal_array_search(struct xarray *array, u32 handle, u64 point)
108 {
109 	struct pvr_sync_signal *sig_sync;
110 	unsigned long i;
111 
112 	xa_for_each(array, i, sig_sync) {
113 		if (handle == sig_sync->handle && point == sig_sync->point)
114 			return sig_sync;
115 	}
116 
117 	return NULL;
118 }
119 
120 static struct pvr_sync_signal *
121 pvr_sync_signal_array_get(struct xarray *array, struct drm_file *file, u32 handle, u64 point)
122 {
123 	struct pvr_sync_signal *sig_sync;
124 
125 	sig_sync = pvr_sync_signal_array_search(array, handle, point);
126 	if (sig_sync)
127 		return sig_sync;
128 
129 	return pvr_sync_signal_array_add(array, file, handle, point);
130 }
131 
132 int
133 pvr_sync_signal_array_collect_ops(struct xarray *array,
134 				  struct drm_file *file,
135 				  u32 sync_op_count,
136 				  const struct drm_pvr_sync_op *sync_ops)
137 {
138 	for (u32 i = 0; i < sync_op_count; i++) {
139 		struct pvr_sync_signal *sig_sync;
140 		int ret;
141 
142 		if (!(sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL))
143 			continue;
144 
145 		ret = pvr_check_sync_op(&sync_ops[i]);
146 		if (ret)
147 			return ret;
148 
149 		sig_sync = pvr_sync_signal_array_get(array, file,
150 						     sync_ops[i].handle,
151 						     sync_ops[i].value);
152 		if (IS_ERR(sig_sync))
153 			return PTR_ERR(sig_sync);
154 	}
155 
156 	return 0;
157 }
158 
159 int
160 pvr_sync_signal_array_update_fences(struct xarray *array,
161 				    u32 sync_op_count,
162 				    const struct drm_pvr_sync_op *sync_ops,
163 				    struct dma_fence *done_fence)
164 {
165 	for (u32 i = 0; i < sync_op_count; i++) {
166 		struct dma_fence *old_fence;
167 		struct pvr_sync_signal *sig_sync;
168 
169 		if (!(sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL))
170 			continue;
171 
172 		sig_sync = pvr_sync_signal_array_search(array, sync_ops[i].handle,
173 							sync_ops[i].value);
174 		if (WARN_ON(!sig_sync))
175 			return -EINVAL;
176 
177 		old_fence = sig_sync->fence;
178 		sig_sync->fence = dma_fence_get(done_fence);
179 		dma_fence_put(old_fence);
180 
181 		if (WARN_ON(!sig_sync->fence))
182 			return -EINVAL;
183 	}
184 
185 	return 0;
186 }
187 
188 void
189 pvr_sync_signal_array_push_fences(struct xarray *array)
190 {
191 	struct pvr_sync_signal *sig_sync;
192 	unsigned long i;
193 
194 	xa_for_each(array, i, sig_sync) {
195 		if (sig_sync->chain) {
196 			drm_syncobj_add_point(sig_sync->syncobj, sig_sync->chain,
197 					      sig_sync->fence, sig_sync->point);
198 			sig_sync->chain = NULL;
199 		} else {
200 			drm_syncobj_replace_fence(sig_sync->syncobj, sig_sync->fence);
201 		}
202 	}
203 }
204 
205 static int
206 pvr_sync_add_dep_to_job(struct drm_sched_job *job, struct dma_fence *f)
207 {
208 	struct dma_fence_unwrap iter;
209 	u32 native_fence_count = 0;
210 	struct dma_fence *uf;
211 	int err = 0;
212 
213 	dma_fence_unwrap_for_each(uf, &iter, f) {
214 		if (pvr_queue_fence_is_ufo_backed(uf))
215 			native_fence_count++;
216 	}
217 
218 	/* No need to unwrap the fence if it's fully non-native. */
219 	if (!native_fence_count)
220 		return drm_sched_job_add_dependency(job, f);
221 
222 	dma_fence_unwrap_for_each(uf, &iter, f) {
223 		/* There's no dma_fence_unwrap_stop() helper cleaning up the refs
224 		 * owned by dma_fence_unwrap(), so let's just iterate over all
225 		 * entries without doing anything when something failed.
226 		 */
227 		if (err)
228 			continue;
229 
230 		if (pvr_queue_fence_is_ufo_backed(uf)) {
231 			struct drm_sched_fence *s_fence = to_drm_sched_fence(uf);
232 
233 			/* If this is a native dependency, we wait for the scheduled fence,
234 			 * and we will let pvr_queue_run_job() issue FW waits.
235 			 */
236 			err = drm_sched_job_add_dependency(job,
237 							   dma_fence_get(&s_fence->scheduled));
238 		} else {
239 			err = drm_sched_job_add_dependency(job, dma_fence_get(uf));
240 		}
241 	}
242 
243 	dma_fence_put(f);
244 	return err;
245 }
246 
247 int
248 pvr_sync_add_deps_to_job(struct pvr_file *pvr_file, struct drm_sched_job *job,
249 			 u32 sync_op_count,
250 			 const struct drm_pvr_sync_op *sync_ops,
251 			 struct xarray *signal_array)
252 {
253 	int err = 0;
254 
255 	if (!sync_op_count)
256 		return 0;
257 
258 	for (u32 i = 0; i < sync_op_count; i++) {
259 		struct pvr_sync_signal *sig_sync;
260 		struct dma_fence *fence;
261 
262 		if (sync_ops[i].flags & DRM_PVR_SYNC_OP_FLAG_SIGNAL)
263 			continue;
264 
265 		err = pvr_check_sync_op(&sync_ops[i]);
266 		if (err)
267 			return err;
268 
269 		sig_sync = pvr_sync_signal_array_search(signal_array, sync_ops[i].handle,
270 							sync_ops[i].value);
271 		if (sig_sync) {
272 			if (WARN_ON(!sig_sync->fence))
273 				return -EINVAL;
274 
275 			fence = dma_fence_get(sig_sync->fence);
276 		} else {
277 			err = drm_syncobj_find_fence(from_pvr_file(pvr_file), sync_ops[i].handle,
278 						     sync_ops[i].value, 0, &fence);
279 			if (err)
280 				return err;
281 		}
282 
283 		err = pvr_sync_add_dep_to_job(job, fence);
284 		if (err)
285 			return err;
286 	}
287 
288 	return 0;
289 }
290