xref: /linux/drivers/misc/ntsync.c (revision 6d2478a103a8238c5382f8a318735aa75d49803a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * ntsync.c - Kernel driver for NT synchronization primitives
4  *
5  * Copyright (C) 2024 Elizabeth Figura <zfigura@codeweavers.com>
6  */
7 
8 #include <linux/anon_inodes.h>
9 #include <linux/atomic.h>
10 #include <linux/file.h>
11 #include <linux/fs.h>
12 #include <linux/hrtimer.h>
13 #include <linux/ktime.h>
14 #include <linux/miscdevice.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/overflow.h>
18 #include <linux/sched.h>
19 #include <linux/sched/signal.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <uapi/linux/ntsync.h>
23 
24 #define NTSYNC_NAME	"ntsync"
25 
26 enum ntsync_type {
27 	NTSYNC_TYPE_SEM,
28 	NTSYNC_TYPE_MUTEX,
29 	NTSYNC_TYPE_EVENT,
30 };
31 
32 /*
33  * Individual synchronization primitives are represented by
34  * struct ntsync_obj, and each primitive is backed by a file.
35  *
36  * The whole namespace is represented by a struct ntsync_device also
37  * backed by a file.
38  *
39  * Both rely on struct file for reference counting. Individual
40  * ntsync_obj objects take a reference to the device when created.
41  * Wait operations take a reference to each object being waited on for
42  * the duration of the wait.
43  */
44 
45 struct ntsync_obj {
46 	spinlock_t lock;
47 	int dev_locked;
48 
49 	enum ntsync_type type;
50 
51 	struct file *file;
52 	struct ntsync_device *dev;
53 
54 	/* The following fields are protected by the object lock. */
55 	union {
56 		struct {
57 			__u32 count;
58 			__u32 max;
59 		} sem;
60 		struct {
61 			__u32 count;
62 			pid_t owner;
63 			bool ownerdead;
64 		} mutex;
65 		struct {
66 			bool manual;
67 			bool signaled;
68 		} event;
69 	} u;
70 
71 	/*
72 	 * any_waiters is protected by the object lock, but all_waiters is
73 	 * protected by the device wait_all_lock.
74 	 */
75 	struct list_head any_waiters;
76 	struct list_head all_waiters;
77 
78 	/*
79 	 * Hint describing how many tasks are queued on this object in a
80 	 * wait-all operation.
81 	 *
82 	 * Any time we do a wake, we may need to wake "all" waiters as well as
83 	 * "any" waiters. In order to atomically wake "all" waiters, we must
84 	 * lock all of the objects, and that means grabbing the wait_all_lock
85 	 * below (and, due to lock ordering rules, before locking this object).
86 	 * However, wait-all is a rare operation, and grabbing the wait-all
87 	 * lock for every wake would create unnecessary contention.
88 	 * Therefore we first check whether all_hint is zero, and, if it is,
89 	 * we skip trying to wake "all" waiters.
90 	 *
91 	 * Since wait requests must originate from user-space threads, we're
92 	 * limited here by PID_MAX_LIMIT, so there's no risk of overflow.
93 	 */
94 	atomic_t all_hint;
95 };
96 
97 struct ntsync_q_entry {
98 	struct list_head node;
99 	struct ntsync_q *q;
100 	struct ntsync_obj *obj;
101 	__u32 index;
102 };
103 
104 struct ntsync_q {
105 	struct task_struct *task;
106 	__u32 owner;
107 
108 	/*
109 	 * Protected via atomic_try_cmpxchg(). Only the thread that wins the
110 	 * compare-and-swap may actually change object states and wake this
111 	 * task.
112 	 */
113 	atomic_t signaled;
114 
115 	bool all;
116 	bool ownerdead;
117 	__u32 count;
118 	struct ntsync_q_entry entries[];
119 };
120 
121 struct ntsync_device {
122 	/*
123 	 * Wait-all operations must atomically grab all objects, and be totally
124 	 * ordered with respect to each other and wait-any operations.
125 	 * If one thread is trying to acquire several objects, another thread
126 	 * cannot touch the object at the same time.
127 	 *
128 	 * This device-wide lock is used to serialize wait-for-all
129 	 * operations, and operations on an object that is involved in a
130 	 * wait-for-all.
131 	 */
132 	struct mutex wait_all_lock;
133 
134 	struct file *file;
135 };
136 
137 /*
138  * Single objects are locked using obj->lock.
139  *
140  * Multiple objects are 'locked' while holding dev->wait_all_lock.
141  * In this case however, individual objects are not locked by holding
142  * obj->lock, but by setting obj->dev_locked.
143  *
144  * This means that in order to lock a single object, the sequence is slightly
145  * more complicated than usual. Specifically it needs to check obj->dev_locked
146  * after acquiring obj->lock, if set, it needs to drop the lock and acquire
147  * dev->wait_all_lock in order to serialize against the multi-object operation.
148  */
149 
150 static void dev_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
151 {
152 	lockdep_assert_held(&dev->wait_all_lock);
153 	lockdep_assert(obj->dev == dev);
154 	spin_lock(&obj->lock);
155 	/*
156 	 * By setting obj->dev_locked inside obj->lock, it is ensured that
157 	 * anyone holding obj->lock must see the value.
158 	 */
159 	obj->dev_locked = 1;
160 	spin_unlock(&obj->lock);
161 }
162 
163 static void dev_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
164 {
165 	lockdep_assert_held(&dev->wait_all_lock);
166 	lockdep_assert(obj->dev == dev);
167 	spin_lock(&obj->lock);
168 	obj->dev_locked = 0;
169 	spin_unlock(&obj->lock);
170 }
171 
172 static void obj_lock(struct ntsync_obj *obj)
173 {
174 	struct ntsync_device *dev = obj->dev;
175 
176 	for (;;) {
177 		spin_lock(&obj->lock);
178 		if (likely(!obj->dev_locked))
179 			break;
180 
181 		spin_unlock(&obj->lock);
182 		mutex_lock(&dev->wait_all_lock);
183 		spin_lock(&obj->lock);
184 		/*
185 		 * obj->dev_locked should be set and released under the same
186 		 * wait_all_lock section, since we now own this lock, it should
187 		 * be clear.
188 		 */
189 		lockdep_assert(!obj->dev_locked);
190 		spin_unlock(&obj->lock);
191 		mutex_unlock(&dev->wait_all_lock);
192 	}
193 }
194 
195 static void obj_unlock(struct ntsync_obj *obj)
196 {
197 	spin_unlock(&obj->lock);
198 }
199 
200 static bool ntsync_lock_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
201 {
202 	bool all;
203 
204 	obj_lock(obj);
205 	all = atomic_read(&obj->all_hint);
206 	if (unlikely(all)) {
207 		obj_unlock(obj);
208 		mutex_lock(&dev->wait_all_lock);
209 		dev_lock_obj(dev, obj);
210 	}
211 
212 	return all;
213 }
214 
215 static void ntsync_unlock_obj(struct ntsync_device *dev, struct ntsync_obj *obj, bool all)
216 {
217 	if (all) {
218 		dev_unlock_obj(dev, obj);
219 		mutex_unlock(&dev->wait_all_lock);
220 	} else {
221 		obj_unlock(obj);
222 	}
223 }
224 
225 #define ntsync_assert_held(obj) \
226 	lockdep_assert((lockdep_is_held(&(obj)->lock) != LOCK_STATE_NOT_HELD) || \
227 		       ((lockdep_is_held(&(obj)->dev->wait_all_lock) != LOCK_STATE_NOT_HELD) && \
228 			(obj)->dev_locked))
229 
230 static bool is_signaled(struct ntsync_obj *obj, __u32 owner)
231 {
232 	ntsync_assert_held(obj);
233 
234 	switch (obj->type) {
235 	case NTSYNC_TYPE_SEM:
236 		return !!obj->u.sem.count;
237 	case NTSYNC_TYPE_MUTEX:
238 		if (obj->u.mutex.owner && obj->u.mutex.owner != owner)
239 			return false;
240 		return obj->u.mutex.count < UINT_MAX;
241 	case NTSYNC_TYPE_EVENT:
242 		return obj->u.event.signaled;
243 	}
244 
245 	WARN(1, "bad object type %#x\n", obj->type);
246 	return false;
247 }
248 
249 /*
250  * "locked_obj" is an optional pointer to an object which is already locked and
251  * should not be locked again. This is necessary so that changing an object's
252  * state and waking it can be a single atomic operation.
253  */
254 static void try_wake_all(struct ntsync_device *dev, struct ntsync_q *q,
255 			 struct ntsync_obj *locked_obj)
256 {
257 	__u32 count = q->count;
258 	bool can_wake = true;
259 	int signaled = -1;
260 	__u32 i;
261 
262 	lockdep_assert_held(&dev->wait_all_lock);
263 	if (locked_obj)
264 		lockdep_assert(locked_obj->dev_locked);
265 
266 	for (i = 0; i < count; i++) {
267 		if (q->entries[i].obj != locked_obj)
268 			dev_lock_obj(dev, q->entries[i].obj);
269 	}
270 
271 	for (i = 0; i < count; i++) {
272 		if (!is_signaled(q->entries[i].obj, q->owner)) {
273 			can_wake = false;
274 			break;
275 		}
276 	}
277 
278 	if (can_wake && atomic_try_cmpxchg(&q->signaled, &signaled, 0)) {
279 		for (i = 0; i < count; i++) {
280 			struct ntsync_obj *obj = q->entries[i].obj;
281 
282 			switch (obj->type) {
283 			case NTSYNC_TYPE_SEM:
284 				obj->u.sem.count--;
285 				break;
286 			case NTSYNC_TYPE_MUTEX:
287 				if (obj->u.mutex.ownerdead)
288 					q->ownerdead = true;
289 				obj->u.mutex.ownerdead = false;
290 				obj->u.mutex.count++;
291 				obj->u.mutex.owner = q->owner;
292 				break;
293 			case NTSYNC_TYPE_EVENT:
294 				if (!obj->u.event.manual)
295 					obj->u.event.signaled = false;
296 				break;
297 			}
298 		}
299 		wake_up_process(q->task);
300 	}
301 
302 	for (i = 0; i < count; i++) {
303 		if (q->entries[i].obj != locked_obj)
304 			dev_unlock_obj(dev, q->entries[i].obj);
305 	}
306 }
307 
308 static void try_wake_all_obj(struct ntsync_device *dev, struct ntsync_obj *obj)
309 {
310 	struct ntsync_q_entry *entry;
311 
312 	lockdep_assert_held(&dev->wait_all_lock);
313 	lockdep_assert(obj->dev_locked);
314 
315 	list_for_each_entry(entry, &obj->all_waiters, node)
316 		try_wake_all(dev, entry->q, obj);
317 }
318 
319 static void try_wake_any_sem(struct ntsync_obj *sem)
320 {
321 	struct ntsync_q_entry *entry;
322 
323 	ntsync_assert_held(sem);
324 	lockdep_assert(sem->type == NTSYNC_TYPE_SEM);
325 
326 	list_for_each_entry(entry, &sem->any_waiters, node) {
327 		struct ntsync_q *q = entry->q;
328 		int signaled = -1;
329 
330 		if (!sem->u.sem.count)
331 			break;
332 
333 		if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
334 			sem->u.sem.count--;
335 			wake_up_process(q->task);
336 		}
337 	}
338 }
339 
340 static void try_wake_any_mutex(struct ntsync_obj *mutex)
341 {
342 	struct ntsync_q_entry *entry;
343 
344 	ntsync_assert_held(mutex);
345 	lockdep_assert(mutex->type == NTSYNC_TYPE_MUTEX);
346 
347 	list_for_each_entry(entry, &mutex->any_waiters, node) {
348 		struct ntsync_q *q = entry->q;
349 		int signaled = -1;
350 
351 		if (mutex->u.mutex.count == UINT_MAX)
352 			break;
353 		if (mutex->u.mutex.owner && mutex->u.mutex.owner != q->owner)
354 			continue;
355 
356 		if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
357 			if (mutex->u.mutex.ownerdead)
358 				q->ownerdead = true;
359 			mutex->u.mutex.ownerdead = false;
360 			mutex->u.mutex.count++;
361 			mutex->u.mutex.owner = q->owner;
362 			wake_up_process(q->task);
363 		}
364 	}
365 }
366 
367 static void try_wake_any_event(struct ntsync_obj *event)
368 {
369 	struct ntsync_q_entry *entry;
370 
371 	ntsync_assert_held(event);
372 	lockdep_assert(event->type == NTSYNC_TYPE_EVENT);
373 
374 	list_for_each_entry(entry, &event->any_waiters, node) {
375 		struct ntsync_q *q = entry->q;
376 		int signaled = -1;
377 
378 		if (!event->u.event.signaled)
379 			break;
380 
381 		if (atomic_try_cmpxchg(&q->signaled, &signaled, entry->index)) {
382 			if (!event->u.event.manual)
383 				event->u.event.signaled = false;
384 			wake_up_process(q->task);
385 		}
386 	}
387 }
388 
389 /*
390  * Actually change the semaphore state, returning -EOVERFLOW if it is made
391  * invalid.
392  */
393 static int release_sem_state(struct ntsync_obj *sem, __u32 count)
394 {
395 	__u32 sum;
396 
397 	ntsync_assert_held(sem);
398 
399 	if (check_add_overflow(sem->u.sem.count, count, &sum) ||
400 	    sum > sem->u.sem.max)
401 		return -EOVERFLOW;
402 
403 	sem->u.sem.count = sum;
404 	return 0;
405 }
406 
407 static int ntsync_sem_release(struct ntsync_obj *sem, void __user *argp)
408 {
409 	struct ntsync_device *dev = sem->dev;
410 	__u32 __user *user_args = argp;
411 	__u32 prev_count;
412 	__u32 args;
413 	bool all;
414 	int ret;
415 
416 	if (copy_from_user(&args, argp, sizeof(args)))
417 		return -EFAULT;
418 
419 	if (sem->type != NTSYNC_TYPE_SEM)
420 		return -EINVAL;
421 
422 	all = ntsync_lock_obj(dev, sem);
423 
424 	prev_count = sem->u.sem.count;
425 	ret = release_sem_state(sem, args);
426 	if (!ret) {
427 		if (all)
428 			try_wake_all_obj(dev, sem);
429 		try_wake_any_sem(sem);
430 	}
431 
432 	ntsync_unlock_obj(dev, sem, all);
433 
434 	if (!ret && put_user(prev_count, user_args))
435 		ret = -EFAULT;
436 
437 	return ret;
438 }
439 
440 /*
441  * Actually change the mutex state, returning -EPERM if not the owner.
442  */
443 static int unlock_mutex_state(struct ntsync_obj *mutex,
444 			      const struct ntsync_mutex_args *args)
445 {
446 	ntsync_assert_held(mutex);
447 
448 	if (mutex->u.mutex.owner != args->owner)
449 		return -EPERM;
450 
451 	if (!--mutex->u.mutex.count)
452 		mutex->u.mutex.owner = 0;
453 	return 0;
454 }
455 
456 static int ntsync_mutex_unlock(struct ntsync_obj *mutex, void __user *argp)
457 {
458 	struct ntsync_mutex_args __user *user_args = argp;
459 	struct ntsync_device *dev = mutex->dev;
460 	struct ntsync_mutex_args args;
461 	__u32 prev_count;
462 	bool all;
463 	int ret;
464 
465 	if (copy_from_user(&args, argp, sizeof(args)))
466 		return -EFAULT;
467 	if (!args.owner)
468 		return -EINVAL;
469 
470 	if (mutex->type != NTSYNC_TYPE_MUTEX)
471 		return -EINVAL;
472 
473 	all = ntsync_lock_obj(dev, mutex);
474 
475 	prev_count = mutex->u.mutex.count;
476 	ret = unlock_mutex_state(mutex, &args);
477 	if (!ret) {
478 		if (all)
479 			try_wake_all_obj(dev, mutex);
480 		try_wake_any_mutex(mutex);
481 	}
482 
483 	ntsync_unlock_obj(dev, mutex, all);
484 
485 	if (!ret && put_user(prev_count, &user_args->count))
486 		ret = -EFAULT;
487 
488 	return ret;
489 }
490 
491 /*
492  * Actually change the mutex state to mark its owner as dead,
493  * returning -EPERM if not the owner.
494  */
495 static int kill_mutex_state(struct ntsync_obj *mutex, __u32 owner)
496 {
497 	ntsync_assert_held(mutex);
498 
499 	if (mutex->u.mutex.owner != owner)
500 		return -EPERM;
501 
502 	mutex->u.mutex.ownerdead = true;
503 	mutex->u.mutex.owner = 0;
504 	mutex->u.mutex.count = 0;
505 	return 0;
506 }
507 
508 static int ntsync_mutex_kill(struct ntsync_obj *mutex, void __user *argp)
509 {
510 	struct ntsync_device *dev = mutex->dev;
511 	__u32 owner;
512 	bool all;
513 	int ret;
514 
515 	if (get_user(owner, (__u32 __user *)argp))
516 		return -EFAULT;
517 	if (!owner)
518 		return -EINVAL;
519 
520 	if (mutex->type != NTSYNC_TYPE_MUTEX)
521 		return -EINVAL;
522 
523 	all = ntsync_lock_obj(dev, mutex);
524 
525 	ret = kill_mutex_state(mutex, owner);
526 	if (!ret) {
527 		if (all)
528 			try_wake_all_obj(dev, mutex);
529 		try_wake_any_mutex(mutex);
530 	}
531 
532 	ntsync_unlock_obj(dev, mutex, all);
533 
534 	return ret;
535 }
536 
537 static int ntsync_event_set(struct ntsync_obj *event, void __user *argp, bool pulse)
538 {
539 	struct ntsync_device *dev = event->dev;
540 	__u32 prev_state;
541 	bool all;
542 
543 	if (event->type != NTSYNC_TYPE_EVENT)
544 		return -EINVAL;
545 
546 	all = ntsync_lock_obj(dev, event);
547 
548 	prev_state = event->u.event.signaled;
549 	event->u.event.signaled = true;
550 	if (all)
551 		try_wake_all_obj(dev, event);
552 	try_wake_any_event(event);
553 	if (pulse)
554 		event->u.event.signaled = false;
555 
556 	ntsync_unlock_obj(dev, event, all);
557 
558 	if (put_user(prev_state, (__u32 __user *)argp))
559 		return -EFAULT;
560 
561 	return 0;
562 }
563 
564 static int ntsync_event_reset(struct ntsync_obj *event, void __user *argp)
565 {
566 	struct ntsync_device *dev = event->dev;
567 	__u32 prev_state;
568 	bool all;
569 
570 	if (event->type != NTSYNC_TYPE_EVENT)
571 		return -EINVAL;
572 
573 	all = ntsync_lock_obj(dev, event);
574 
575 	prev_state = event->u.event.signaled;
576 	event->u.event.signaled = false;
577 
578 	ntsync_unlock_obj(dev, event, all);
579 
580 	if (put_user(prev_state, (__u32 __user *)argp))
581 		return -EFAULT;
582 
583 	return 0;
584 }
585 
586 static int ntsync_sem_read(struct ntsync_obj *sem, void __user *argp)
587 {
588 	struct ntsync_sem_args __user *user_args = argp;
589 	struct ntsync_device *dev = sem->dev;
590 	struct ntsync_sem_args args;
591 	bool all;
592 
593 	if (sem->type != NTSYNC_TYPE_SEM)
594 		return -EINVAL;
595 
596 	all = ntsync_lock_obj(dev, sem);
597 
598 	args.count = sem->u.sem.count;
599 	args.max = sem->u.sem.max;
600 
601 	ntsync_unlock_obj(dev, sem, all);
602 
603 	if (copy_to_user(user_args, &args, sizeof(args)))
604 		return -EFAULT;
605 	return 0;
606 }
607 
608 static int ntsync_mutex_read(struct ntsync_obj *mutex, void __user *argp)
609 {
610 	struct ntsync_mutex_args __user *user_args = argp;
611 	struct ntsync_device *dev = mutex->dev;
612 	struct ntsync_mutex_args args;
613 	bool all;
614 	int ret;
615 
616 	if (mutex->type != NTSYNC_TYPE_MUTEX)
617 		return -EINVAL;
618 
619 	all = ntsync_lock_obj(dev, mutex);
620 
621 	args.count = mutex->u.mutex.count;
622 	args.owner = mutex->u.mutex.owner;
623 	ret = mutex->u.mutex.ownerdead ? -EOWNERDEAD : 0;
624 
625 	ntsync_unlock_obj(dev, mutex, all);
626 
627 	if (copy_to_user(user_args, &args, sizeof(args)))
628 		return -EFAULT;
629 	return ret;
630 }
631 
632 static int ntsync_event_read(struct ntsync_obj *event, void __user *argp)
633 {
634 	struct ntsync_event_args __user *user_args = argp;
635 	struct ntsync_device *dev = event->dev;
636 	struct ntsync_event_args args;
637 	bool all;
638 
639 	if (event->type != NTSYNC_TYPE_EVENT)
640 		return -EINVAL;
641 
642 	all = ntsync_lock_obj(dev, event);
643 
644 	args.manual = event->u.event.manual;
645 	args.signaled = event->u.event.signaled;
646 
647 	ntsync_unlock_obj(dev, event, all);
648 
649 	if (copy_to_user(user_args, &args, sizeof(args)))
650 		return -EFAULT;
651 	return 0;
652 }
653 
654 static int ntsync_obj_release(struct inode *inode, struct file *file)
655 {
656 	struct ntsync_obj *obj = file->private_data;
657 
658 	fput(obj->dev->file);
659 	kfree(obj);
660 
661 	return 0;
662 }
663 
664 static long ntsync_obj_ioctl(struct file *file, unsigned int cmd,
665 			     unsigned long parm)
666 {
667 	struct ntsync_obj *obj = file->private_data;
668 	void __user *argp = (void __user *)parm;
669 
670 	switch (cmd) {
671 	case NTSYNC_IOC_SEM_RELEASE:
672 		return ntsync_sem_release(obj, argp);
673 	case NTSYNC_IOC_SEM_READ:
674 		return ntsync_sem_read(obj, argp);
675 	case NTSYNC_IOC_MUTEX_UNLOCK:
676 		return ntsync_mutex_unlock(obj, argp);
677 	case NTSYNC_IOC_MUTEX_KILL:
678 		return ntsync_mutex_kill(obj, argp);
679 	case NTSYNC_IOC_MUTEX_READ:
680 		return ntsync_mutex_read(obj, argp);
681 	case NTSYNC_IOC_EVENT_SET:
682 		return ntsync_event_set(obj, argp, false);
683 	case NTSYNC_IOC_EVENT_RESET:
684 		return ntsync_event_reset(obj, argp);
685 	case NTSYNC_IOC_EVENT_PULSE:
686 		return ntsync_event_set(obj, argp, true);
687 	case NTSYNC_IOC_EVENT_READ:
688 		return ntsync_event_read(obj, argp);
689 	default:
690 		return -ENOIOCTLCMD;
691 	}
692 }
693 
694 static const struct file_operations ntsync_obj_fops = {
695 	.owner		= THIS_MODULE,
696 	.release	= ntsync_obj_release,
697 	.unlocked_ioctl	= ntsync_obj_ioctl,
698 	.compat_ioctl	= compat_ptr_ioctl,
699 };
700 
701 static struct ntsync_obj *ntsync_alloc_obj(struct ntsync_device *dev,
702 					   enum ntsync_type type)
703 {
704 	struct ntsync_obj *obj;
705 
706 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
707 	if (!obj)
708 		return NULL;
709 	obj->type = type;
710 	obj->dev = dev;
711 	get_file(dev->file);
712 	spin_lock_init(&obj->lock);
713 	INIT_LIST_HEAD(&obj->any_waiters);
714 	INIT_LIST_HEAD(&obj->all_waiters);
715 	atomic_set(&obj->all_hint, 0);
716 
717 	return obj;
718 }
719 
720 static int ntsync_obj_get_fd(struct ntsync_obj *obj)
721 {
722 	struct file *file;
723 	int fd;
724 
725 	fd = get_unused_fd_flags(O_CLOEXEC);
726 	if (fd < 0)
727 		return fd;
728 	file = anon_inode_getfile("ntsync", &ntsync_obj_fops, obj, O_RDWR);
729 	if (IS_ERR(file)) {
730 		put_unused_fd(fd);
731 		return PTR_ERR(file);
732 	}
733 	obj->file = file;
734 	fd_install(fd, file);
735 
736 	return fd;
737 }
738 
739 static int ntsync_create_sem(struct ntsync_device *dev, void __user *argp)
740 {
741 	struct ntsync_sem_args args;
742 	struct ntsync_obj *sem;
743 	int fd;
744 
745 	if (copy_from_user(&args, argp, sizeof(args)))
746 		return -EFAULT;
747 
748 	if (args.count > args.max)
749 		return -EINVAL;
750 
751 	sem = ntsync_alloc_obj(dev, NTSYNC_TYPE_SEM);
752 	if (!sem)
753 		return -ENOMEM;
754 	sem->u.sem.count = args.count;
755 	sem->u.sem.max = args.max;
756 	fd = ntsync_obj_get_fd(sem);
757 	if (fd < 0)
758 		kfree(sem);
759 
760 	return fd;
761 }
762 
763 static int ntsync_create_mutex(struct ntsync_device *dev, void __user *argp)
764 {
765 	struct ntsync_mutex_args args;
766 	struct ntsync_obj *mutex;
767 	int fd;
768 
769 	if (copy_from_user(&args, argp, sizeof(args)))
770 		return -EFAULT;
771 
772 	if (!args.owner != !args.count)
773 		return -EINVAL;
774 
775 	mutex = ntsync_alloc_obj(dev, NTSYNC_TYPE_MUTEX);
776 	if (!mutex)
777 		return -ENOMEM;
778 	mutex->u.mutex.count = args.count;
779 	mutex->u.mutex.owner = args.owner;
780 	fd = ntsync_obj_get_fd(mutex);
781 	if (fd < 0)
782 		kfree(mutex);
783 
784 	return fd;
785 }
786 
787 static int ntsync_create_event(struct ntsync_device *dev, void __user *argp)
788 {
789 	struct ntsync_event_args args;
790 	struct ntsync_obj *event;
791 	int fd;
792 
793 	if (copy_from_user(&args, argp, sizeof(args)))
794 		return -EFAULT;
795 
796 	event = ntsync_alloc_obj(dev, NTSYNC_TYPE_EVENT);
797 	if (!event)
798 		return -ENOMEM;
799 	event->u.event.manual = args.manual;
800 	event->u.event.signaled = args.signaled;
801 	fd = ntsync_obj_get_fd(event);
802 	if (fd < 0)
803 		kfree(event);
804 
805 	return fd;
806 }
807 
808 static struct ntsync_obj *get_obj(struct ntsync_device *dev, int fd)
809 {
810 	struct file *file = fget(fd);
811 	struct ntsync_obj *obj;
812 
813 	if (!file)
814 		return NULL;
815 
816 	if (file->f_op != &ntsync_obj_fops) {
817 		fput(file);
818 		return NULL;
819 	}
820 
821 	obj = file->private_data;
822 	if (obj->dev != dev) {
823 		fput(file);
824 		return NULL;
825 	}
826 
827 	return obj;
828 }
829 
830 static void put_obj(struct ntsync_obj *obj)
831 {
832 	fput(obj->file);
833 }
834 
835 static int ntsync_schedule(const struct ntsync_q *q, const struct ntsync_wait_args *args)
836 {
837 	ktime_t timeout = ns_to_ktime(args->timeout);
838 	clockid_t clock = CLOCK_MONOTONIC;
839 	ktime_t *timeout_ptr;
840 	int ret = 0;
841 
842 	timeout_ptr = (args->timeout == U64_MAX ? NULL : &timeout);
843 
844 	if (args->flags & NTSYNC_WAIT_REALTIME)
845 		clock = CLOCK_REALTIME;
846 
847 	do {
848 		if (signal_pending(current)) {
849 			ret = -ERESTARTSYS;
850 			break;
851 		}
852 
853 		set_current_state(TASK_INTERRUPTIBLE);
854 		if (atomic_read(&q->signaled) != -1) {
855 			ret = 0;
856 			break;
857 		}
858 		ret = schedule_hrtimeout_range_clock(timeout_ptr, 0, HRTIMER_MODE_ABS, clock);
859 	} while (ret < 0);
860 	__set_current_state(TASK_RUNNING);
861 
862 	return ret;
863 }
864 
865 /*
866  * Allocate and initialize the ntsync_q structure, but do not queue us yet.
867  */
868 static int setup_wait(struct ntsync_device *dev,
869 		      const struct ntsync_wait_args *args, bool all,
870 		      struct ntsync_q **ret_q)
871 {
872 	int fds[NTSYNC_MAX_WAIT_COUNT + 1];
873 	const __u32 count = args->count;
874 	struct ntsync_q *q;
875 	__u32 total_count;
876 	__u32 i, j;
877 
878 	if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME))
879 		return -EINVAL;
880 
881 	if (args->count > NTSYNC_MAX_WAIT_COUNT)
882 		return -EINVAL;
883 
884 	total_count = count;
885 	if (args->alert)
886 		total_count++;
887 
888 	if (copy_from_user(fds, u64_to_user_ptr(args->objs),
889 			   array_size(count, sizeof(*fds))))
890 		return -EFAULT;
891 	if (args->alert)
892 		fds[count] = args->alert;
893 
894 	q = kmalloc(struct_size(q, entries, total_count), GFP_KERNEL);
895 	if (!q)
896 		return -ENOMEM;
897 	q->task = current;
898 	q->owner = args->owner;
899 	atomic_set(&q->signaled, -1);
900 	q->all = all;
901 	q->ownerdead = false;
902 	q->count = count;
903 
904 	for (i = 0; i < total_count; i++) {
905 		struct ntsync_q_entry *entry = &q->entries[i];
906 		struct ntsync_obj *obj = get_obj(dev, fds[i]);
907 
908 		if (!obj)
909 			goto err;
910 
911 		if (all) {
912 			/* Check that the objects are all distinct. */
913 			for (j = 0; j < i; j++) {
914 				if (obj == q->entries[j].obj) {
915 					put_obj(obj);
916 					goto err;
917 				}
918 			}
919 		}
920 
921 		entry->obj = obj;
922 		entry->q = q;
923 		entry->index = i;
924 	}
925 
926 	*ret_q = q;
927 	return 0;
928 
929 err:
930 	for (j = 0; j < i; j++)
931 		put_obj(q->entries[j].obj);
932 	kfree(q);
933 	return -EINVAL;
934 }
935 
936 static void try_wake_any_obj(struct ntsync_obj *obj)
937 {
938 	switch (obj->type) {
939 	case NTSYNC_TYPE_SEM:
940 		try_wake_any_sem(obj);
941 		break;
942 	case NTSYNC_TYPE_MUTEX:
943 		try_wake_any_mutex(obj);
944 		break;
945 	case NTSYNC_TYPE_EVENT:
946 		try_wake_any_event(obj);
947 		break;
948 	}
949 }
950 
951 static int ntsync_wait_any(struct ntsync_device *dev, void __user *argp)
952 {
953 	struct ntsync_wait_args args;
954 	__u32 i, total_count;
955 	struct ntsync_q *q;
956 	int signaled;
957 	bool all;
958 	int ret;
959 
960 	if (copy_from_user(&args, argp, sizeof(args)))
961 		return -EFAULT;
962 
963 	ret = setup_wait(dev, &args, false, &q);
964 	if (ret < 0)
965 		return ret;
966 
967 	total_count = args.count;
968 	if (args.alert)
969 		total_count++;
970 
971 	/* queue ourselves */
972 
973 	for (i = 0; i < total_count; i++) {
974 		struct ntsync_q_entry *entry = &q->entries[i];
975 		struct ntsync_obj *obj = entry->obj;
976 
977 		all = ntsync_lock_obj(dev, obj);
978 		list_add_tail(&entry->node, &obj->any_waiters);
979 		ntsync_unlock_obj(dev, obj, all);
980 	}
981 
982 	/*
983 	 * Check if we are already signaled.
984 	 *
985 	 * Note that the API requires that normal objects are checked before
986 	 * the alert event. Hence we queue the alert event last, and check
987 	 * objects in order.
988 	 */
989 
990 	for (i = 0; i < total_count; i++) {
991 		struct ntsync_obj *obj = q->entries[i].obj;
992 
993 		if (atomic_read(&q->signaled) != -1)
994 			break;
995 
996 		all = ntsync_lock_obj(dev, obj);
997 		try_wake_any_obj(obj);
998 		ntsync_unlock_obj(dev, obj, all);
999 	}
1000 
1001 	/* sleep */
1002 
1003 	ret = ntsync_schedule(q, &args);
1004 
1005 	/* and finally, unqueue */
1006 
1007 	for (i = 0; i < total_count; i++) {
1008 		struct ntsync_q_entry *entry = &q->entries[i];
1009 		struct ntsync_obj *obj = entry->obj;
1010 
1011 		all = ntsync_lock_obj(dev, obj);
1012 		list_del(&entry->node);
1013 		ntsync_unlock_obj(dev, obj, all);
1014 
1015 		put_obj(obj);
1016 	}
1017 
1018 	signaled = atomic_read(&q->signaled);
1019 	if (signaled != -1) {
1020 		struct ntsync_wait_args __user *user_args = argp;
1021 
1022 		/* even if we caught a signal, we need to communicate success */
1023 		ret = q->ownerdead ? -EOWNERDEAD : 0;
1024 
1025 		if (put_user(signaled, &user_args->index))
1026 			ret = -EFAULT;
1027 	} else if (!ret) {
1028 		ret = -ETIMEDOUT;
1029 	}
1030 
1031 	kfree(q);
1032 	return ret;
1033 }
1034 
1035 static int ntsync_wait_all(struct ntsync_device *dev, void __user *argp)
1036 {
1037 	struct ntsync_wait_args args;
1038 	struct ntsync_q *q;
1039 	int signaled;
1040 	__u32 i;
1041 	int ret;
1042 
1043 	if (copy_from_user(&args, argp, sizeof(args)))
1044 		return -EFAULT;
1045 
1046 	ret = setup_wait(dev, &args, true, &q);
1047 	if (ret < 0)
1048 		return ret;
1049 
1050 	/* queue ourselves */
1051 
1052 	mutex_lock(&dev->wait_all_lock);
1053 
1054 	for (i = 0; i < args.count; i++) {
1055 		struct ntsync_q_entry *entry = &q->entries[i];
1056 		struct ntsync_obj *obj = entry->obj;
1057 
1058 		atomic_inc(&obj->all_hint);
1059 
1060 		/*
1061 		 * obj->all_waiters is protected by dev->wait_all_lock rather
1062 		 * than obj->lock, so there is no need to acquire obj->lock
1063 		 * here.
1064 		 */
1065 		list_add_tail(&entry->node, &obj->all_waiters);
1066 	}
1067 	if (args.alert) {
1068 		struct ntsync_q_entry *entry = &q->entries[args.count];
1069 		struct ntsync_obj *obj = entry->obj;
1070 
1071 		dev_lock_obj(dev, obj);
1072 		list_add_tail(&entry->node, &obj->any_waiters);
1073 		dev_unlock_obj(dev, obj);
1074 	}
1075 
1076 	/* check if we are already signaled */
1077 
1078 	try_wake_all(dev, q, NULL);
1079 
1080 	mutex_unlock(&dev->wait_all_lock);
1081 
1082 	/*
1083 	 * Check if the alert event is signaled, making sure to do so only
1084 	 * after checking if the other objects are signaled.
1085 	 */
1086 
1087 	if (args.alert) {
1088 		struct ntsync_obj *obj = q->entries[args.count].obj;
1089 
1090 		if (atomic_read(&q->signaled) == -1) {
1091 			bool all = ntsync_lock_obj(dev, obj);
1092 			try_wake_any_obj(obj);
1093 			ntsync_unlock_obj(dev, obj, all);
1094 		}
1095 	}
1096 
1097 	/* sleep */
1098 
1099 	ret = ntsync_schedule(q, &args);
1100 
1101 	/* and finally, unqueue */
1102 
1103 	mutex_lock(&dev->wait_all_lock);
1104 
1105 	for (i = 0; i < args.count; i++) {
1106 		struct ntsync_q_entry *entry = &q->entries[i];
1107 		struct ntsync_obj *obj = entry->obj;
1108 
1109 		/*
1110 		 * obj->all_waiters is protected by dev->wait_all_lock rather
1111 		 * than obj->lock, so there is no need to acquire it here.
1112 		 */
1113 		list_del(&entry->node);
1114 
1115 		atomic_dec(&obj->all_hint);
1116 
1117 		put_obj(obj);
1118 	}
1119 
1120 	mutex_unlock(&dev->wait_all_lock);
1121 
1122 	if (args.alert) {
1123 		struct ntsync_q_entry *entry = &q->entries[args.count];
1124 		struct ntsync_obj *obj = entry->obj;
1125 		bool all;
1126 
1127 		all = ntsync_lock_obj(dev, obj);
1128 		list_del(&entry->node);
1129 		ntsync_unlock_obj(dev, obj, all);
1130 
1131 		put_obj(obj);
1132 	}
1133 
1134 	signaled = atomic_read(&q->signaled);
1135 	if (signaled != -1) {
1136 		struct ntsync_wait_args __user *user_args = argp;
1137 
1138 		/* even if we caught a signal, we need to communicate success */
1139 		ret = q->ownerdead ? -EOWNERDEAD : 0;
1140 
1141 		if (put_user(signaled, &user_args->index))
1142 			ret = -EFAULT;
1143 	} else if (!ret) {
1144 		ret = -ETIMEDOUT;
1145 	}
1146 
1147 	kfree(q);
1148 	return ret;
1149 }
1150 
1151 static int ntsync_char_open(struct inode *inode, struct file *file)
1152 {
1153 	struct ntsync_device *dev;
1154 
1155 	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1156 	if (!dev)
1157 		return -ENOMEM;
1158 
1159 	mutex_init(&dev->wait_all_lock);
1160 
1161 	file->private_data = dev;
1162 	dev->file = file;
1163 	return nonseekable_open(inode, file);
1164 }
1165 
1166 static int ntsync_char_release(struct inode *inode, struct file *file)
1167 {
1168 	struct ntsync_device *dev = file->private_data;
1169 
1170 	kfree(dev);
1171 
1172 	return 0;
1173 }
1174 
1175 static long ntsync_char_ioctl(struct file *file, unsigned int cmd,
1176 			      unsigned long parm)
1177 {
1178 	struct ntsync_device *dev = file->private_data;
1179 	void __user *argp = (void __user *)parm;
1180 
1181 	switch (cmd) {
1182 	case NTSYNC_IOC_CREATE_EVENT:
1183 		return ntsync_create_event(dev, argp);
1184 	case NTSYNC_IOC_CREATE_MUTEX:
1185 		return ntsync_create_mutex(dev, argp);
1186 	case NTSYNC_IOC_CREATE_SEM:
1187 		return ntsync_create_sem(dev, argp);
1188 	case NTSYNC_IOC_WAIT_ALL:
1189 		return ntsync_wait_all(dev, argp);
1190 	case NTSYNC_IOC_WAIT_ANY:
1191 		return ntsync_wait_any(dev, argp);
1192 	default:
1193 		return -ENOIOCTLCMD;
1194 	}
1195 }
1196 
1197 static const struct file_operations ntsync_fops = {
1198 	.owner		= THIS_MODULE,
1199 	.open		= ntsync_char_open,
1200 	.release	= ntsync_char_release,
1201 	.unlocked_ioctl	= ntsync_char_ioctl,
1202 	.compat_ioctl	= compat_ptr_ioctl,
1203 };
1204 
1205 static struct miscdevice ntsync_misc = {
1206 	.minor		= MISC_DYNAMIC_MINOR,
1207 	.name		= NTSYNC_NAME,
1208 	.fops		= &ntsync_fops,
1209 };
1210 
1211 module_misc_device(ntsync_misc);
1212 
1213 MODULE_AUTHOR("Elizabeth Figura <zfigura@codeweavers.com>");
1214 MODULE_DESCRIPTION("Kernel driver for NT synchronization primitives");
1215 MODULE_LICENSE("GPL");
1216