xref: /linux/drivers/dma-buf/sw_sync.c (revision 8f8d5745bb520c76b81abef4a2cb3023d0313bfd)
1 /*
2  * Sync File validation framework
3  *
4  * Copyright (C) 2012 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16 
17 #include <linux/file.h>
18 #include <linux/fs.h>
19 #include <linux/uaccess.h>
20 #include <linux/slab.h>
21 #include <linux/sync_file.h>
22 
23 #include "sync_debug.h"
24 
25 #define CREATE_TRACE_POINTS
26 #include "sync_trace.h"
27 
28 /*
29  * SW SYNC validation framework
30  *
31  * A sync object driver that uses a 32bit counter to coordinate
32  * synchronization.  Useful when there is no hardware primitive backing
33  * the synchronization.
34  *
35  * To start the framework just open:
36  *
37  * <debugfs>/sync/sw_sync
38  *
39  * That will create a sync timeline, all fences created under this timeline
40  * file descriptor will belong to the this timeline.
41  *
42  * The 'sw_sync' file can be opened many times as to create different
43  * timelines.
44  *
45  * Fences can be created with SW_SYNC_IOC_CREATE_FENCE ioctl with struct
46  * sw_sync_create_fence_data as parameter.
47  *
48  * To increment the timeline counter, SW_SYNC_IOC_INC ioctl should be used
49  * with the increment as u32. This will update the last signaled value
50  * from the timeline and signal any fence that has a seqno smaller or equal
51  * to it.
52  *
53  * struct sw_sync_create_fence_data
54  * @value:	the seqno to initialise the fence with
55  * @name:	the name of the new sync point
56  * @fence:	return the fd of the new sync_file with the created fence
57  */
58 struct sw_sync_create_fence_data {
59 	__u32	value;
60 	char	name[32];
61 	__s32	fence; /* fd of new fence */
62 };
63 
64 #define SW_SYNC_IOC_MAGIC	'W'
65 
66 #define SW_SYNC_IOC_CREATE_FENCE	_IOWR(SW_SYNC_IOC_MAGIC, 0,\
67 		struct sw_sync_create_fence_data)
68 
69 #define SW_SYNC_IOC_INC			_IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
70 
71 static const struct dma_fence_ops timeline_fence_ops;
72 
73 static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
74 {
75 	if (fence->ops != &timeline_fence_ops)
76 		return NULL;
77 	return container_of(fence, struct sync_pt, base);
78 }
79 
80 /**
81  * sync_timeline_create() - creates a sync object
82  * @name:	sync_timeline name
83  *
84  * Creates a new sync_timeline. Returns the sync_timeline object or NULL in
85  * case of error.
86  */
87 static struct sync_timeline *sync_timeline_create(const char *name)
88 {
89 	struct sync_timeline *obj;
90 
91 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
92 	if (!obj)
93 		return NULL;
94 
95 	kref_init(&obj->kref);
96 	obj->context = dma_fence_context_alloc(1);
97 	strlcpy(obj->name, name, sizeof(obj->name));
98 
99 	obj->pt_tree = RB_ROOT;
100 	INIT_LIST_HEAD(&obj->pt_list);
101 	spin_lock_init(&obj->lock);
102 
103 	sync_timeline_debug_add(obj);
104 
105 	return obj;
106 }
107 
108 static void sync_timeline_free(struct kref *kref)
109 {
110 	struct sync_timeline *obj =
111 		container_of(kref, struct sync_timeline, kref);
112 
113 	sync_timeline_debug_remove(obj);
114 
115 	kfree(obj);
116 }
117 
118 static void sync_timeline_get(struct sync_timeline *obj)
119 {
120 	kref_get(&obj->kref);
121 }
122 
123 static void sync_timeline_put(struct sync_timeline *obj)
124 {
125 	kref_put(&obj->kref, sync_timeline_free);
126 }
127 
128 static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
129 {
130 	return "sw_sync";
131 }
132 
133 static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
134 {
135 	struct sync_timeline *parent = dma_fence_parent(fence);
136 
137 	return parent->name;
138 }
139 
140 static void timeline_fence_release(struct dma_fence *fence)
141 {
142 	struct sync_pt *pt = dma_fence_to_sync_pt(fence);
143 	struct sync_timeline *parent = dma_fence_parent(fence);
144 
145 	if (!list_empty(&pt->link)) {
146 		unsigned long flags;
147 
148 		spin_lock_irqsave(fence->lock, flags);
149 		if (!list_empty(&pt->link)) {
150 			list_del(&pt->link);
151 			rb_erase(&pt->node, &parent->pt_tree);
152 		}
153 		spin_unlock_irqrestore(fence->lock, flags);
154 	}
155 
156 	sync_timeline_put(parent);
157 	dma_fence_free(fence);
158 }
159 
160 static bool timeline_fence_signaled(struct dma_fence *fence)
161 {
162 	struct sync_timeline *parent = dma_fence_parent(fence);
163 
164 	return !__dma_fence_is_later(fence->seqno, parent->value);
165 }
166 
167 static bool timeline_fence_enable_signaling(struct dma_fence *fence)
168 {
169 	return true;
170 }
171 
172 static void timeline_fence_value_str(struct dma_fence *fence,
173 				    char *str, int size)
174 {
175 	snprintf(str, size, "%lld", fence->seqno);
176 }
177 
178 static void timeline_fence_timeline_value_str(struct dma_fence *fence,
179 					     char *str, int size)
180 {
181 	struct sync_timeline *parent = dma_fence_parent(fence);
182 
183 	snprintf(str, size, "%d", parent->value);
184 }
185 
186 static const struct dma_fence_ops timeline_fence_ops = {
187 	.get_driver_name = timeline_fence_get_driver_name,
188 	.get_timeline_name = timeline_fence_get_timeline_name,
189 	.enable_signaling = timeline_fence_enable_signaling,
190 	.signaled = timeline_fence_signaled,
191 	.release = timeline_fence_release,
192 	.fence_value_str = timeline_fence_value_str,
193 	.timeline_value_str = timeline_fence_timeline_value_str,
194 };
195 
196 /**
197  * sync_timeline_signal() - signal a status change on a sync_timeline
198  * @obj:	sync_timeline to signal
199  * @inc:	num to increment on timeline->value
200  *
201  * A sync implementation should call this any time one of it's fences
202  * has signaled or has an error condition.
203  */
204 static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
205 {
206 	struct sync_pt *pt, *next;
207 
208 	trace_sync_timeline(obj);
209 
210 	spin_lock_irq(&obj->lock);
211 
212 	obj->value += inc;
213 
214 	list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
215 		if (!timeline_fence_signaled(&pt->base))
216 			break;
217 
218 		list_del_init(&pt->link);
219 		rb_erase(&pt->node, &obj->pt_tree);
220 
221 		/*
222 		 * A signal callback may release the last reference to this
223 		 * fence, causing it to be freed. That operation has to be
224 		 * last to avoid a use after free inside this loop, and must
225 		 * be after we remove the fence from the timeline in order to
226 		 * prevent deadlocking on timeline->lock inside
227 		 * timeline_fence_release().
228 		 */
229 		dma_fence_signal_locked(&pt->base);
230 	}
231 
232 	spin_unlock_irq(&obj->lock);
233 }
234 
235 /**
236  * sync_pt_create() - creates a sync pt
237  * @obj:	parent sync_timeline
238  * @value:	value of the fence
239  *
240  * Creates a new sync_pt (fence) as a child of @parent.  @size bytes will be
241  * allocated allowing for implementation specific data to be kept after
242  * the generic sync_timeline struct. Returns the sync_pt object or
243  * NULL in case of error.
244  */
245 static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
246 				      unsigned int value)
247 {
248 	struct sync_pt *pt;
249 
250 	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
251 	if (!pt)
252 		return NULL;
253 
254 	sync_timeline_get(obj);
255 	dma_fence_init(&pt->base, &timeline_fence_ops, &obj->lock,
256 		       obj->context, value);
257 	INIT_LIST_HEAD(&pt->link);
258 
259 	spin_lock_irq(&obj->lock);
260 	if (!dma_fence_is_signaled_locked(&pt->base)) {
261 		struct rb_node **p = &obj->pt_tree.rb_node;
262 		struct rb_node *parent = NULL;
263 
264 		while (*p) {
265 			struct sync_pt *other;
266 			int cmp;
267 
268 			parent = *p;
269 			other = rb_entry(parent, typeof(*pt), node);
270 			cmp = value - other->base.seqno;
271 			if (cmp > 0) {
272 				p = &parent->rb_right;
273 			} else if (cmp < 0) {
274 				p = &parent->rb_left;
275 			} else {
276 				if (dma_fence_get_rcu(&other->base)) {
277 					dma_fence_put(&pt->base);
278 					pt = other;
279 					goto unlock;
280 				}
281 				p = &parent->rb_left;
282 			}
283 		}
284 		rb_link_node(&pt->node, parent, p);
285 		rb_insert_color(&pt->node, &obj->pt_tree);
286 
287 		parent = rb_next(&pt->node);
288 		list_add_tail(&pt->link,
289 			      parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list);
290 	}
291 unlock:
292 	spin_unlock_irq(&obj->lock);
293 
294 	return pt;
295 }
296 
297 /*
298  * *WARNING*
299  *
300  * improper use of this can result in deadlocking kernel drivers from userspace.
301  */
302 
303 /* opening sw_sync create a new sync obj */
304 static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
305 {
306 	struct sync_timeline *obj;
307 	char task_comm[TASK_COMM_LEN];
308 
309 	get_task_comm(task_comm, current);
310 
311 	obj = sync_timeline_create(task_comm);
312 	if (!obj)
313 		return -ENOMEM;
314 
315 	file->private_data = obj;
316 
317 	return 0;
318 }
319 
320 static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
321 {
322 	struct sync_timeline *obj = file->private_data;
323 	struct sync_pt *pt, *next;
324 
325 	spin_lock_irq(&obj->lock);
326 
327 	list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
328 		dma_fence_set_error(&pt->base, -ENOENT);
329 		dma_fence_signal_locked(&pt->base);
330 	}
331 
332 	spin_unlock_irq(&obj->lock);
333 
334 	sync_timeline_put(obj);
335 	return 0;
336 }
337 
338 static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
339 				       unsigned long arg)
340 {
341 	int fd = get_unused_fd_flags(O_CLOEXEC);
342 	int err;
343 	struct sync_pt *pt;
344 	struct sync_file *sync_file;
345 	struct sw_sync_create_fence_data data;
346 
347 	if (fd < 0)
348 		return fd;
349 
350 	if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
351 		err = -EFAULT;
352 		goto err;
353 	}
354 
355 	pt = sync_pt_create(obj, data.value);
356 	if (!pt) {
357 		err = -ENOMEM;
358 		goto err;
359 	}
360 
361 	sync_file = sync_file_create(&pt->base);
362 	dma_fence_put(&pt->base);
363 	if (!sync_file) {
364 		err = -ENOMEM;
365 		goto err;
366 	}
367 
368 	data.fence = fd;
369 	if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
370 		fput(sync_file->file);
371 		err = -EFAULT;
372 		goto err;
373 	}
374 
375 	fd_install(fd, sync_file->file);
376 
377 	return 0;
378 
379 err:
380 	put_unused_fd(fd);
381 	return err;
382 }
383 
384 static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
385 {
386 	u32 value;
387 
388 	if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
389 		return -EFAULT;
390 
391 	while (value > INT_MAX)  {
392 		sync_timeline_signal(obj, INT_MAX);
393 		value -= INT_MAX;
394 	}
395 
396 	sync_timeline_signal(obj, value);
397 
398 	return 0;
399 }
400 
401 static long sw_sync_ioctl(struct file *file, unsigned int cmd,
402 			  unsigned long arg)
403 {
404 	struct sync_timeline *obj = file->private_data;
405 
406 	switch (cmd) {
407 	case SW_SYNC_IOC_CREATE_FENCE:
408 		return sw_sync_ioctl_create_fence(obj, arg);
409 
410 	case SW_SYNC_IOC_INC:
411 		return sw_sync_ioctl_inc(obj, arg);
412 
413 	default:
414 		return -ENOTTY;
415 	}
416 }
417 
418 const struct file_operations sw_sync_debugfs_fops = {
419 	.open           = sw_sync_debugfs_open,
420 	.release        = sw_sync_debugfs_release,
421 	.unlocked_ioctl = sw_sync_ioctl,
422 	.compat_ioctl	= sw_sync_ioctl,
423 };
424