1 /*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23 #include <linux/semaphore.h>
24 #include <linux/atomic.h>
25
26 /*
27 * Reusable 2 PHASE task barrier (rendez-vous point) implementation for N tasks.
28 * Based on the Little book of semaphores - https://greenteapress.com/wp/semaphores/
29 */
30
31
32
33 #ifndef DRM_TASK_BARRIER_H_
34 #define DRM_TASK_BARRIER_H_
35
36 /*
37 * Represents an instance of a task barrier.
38 */
39 struct task_barrier {
40 unsigned int n;
41 atomic_t count;
42 struct semaphore enter_turnstile;
43 struct semaphore exit_turnstile;
44 };
45
task_barrier_signal_turnstile(struct semaphore * turnstile,unsigned int n)46 static inline void task_barrier_signal_turnstile(struct semaphore *turnstile,
47 unsigned int n)
48 {
49 int i;
50
51 for (i = 0 ; i < n; i++)
52 up(turnstile);
53 }
54
task_barrier_init(struct task_barrier * tb)55 static inline void task_barrier_init(struct task_barrier *tb)
56 {
57 tb->n = 0;
58 atomic_set(&tb->count, 0);
59 sema_init(&tb->enter_turnstile, 0);
60 sema_init(&tb->exit_turnstile, 0);
61 }
62
task_barrier_add_task(struct task_barrier * tb)63 static inline void task_barrier_add_task(struct task_barrier *tb)
64 {
65 tb->n++;
66 }
67
task_barrier_rem_task(struct task_barrier * tb)68 static inline void task_barrier_rem_task(struct task_barrier *tb)
69 {
70 tb->n--;
71 }
72
73 /*
74 * Lines up all the threads BEFORE the critical point.
75 *
76 * When all thread passed this code the entry barrier is back to locked state.
77 */
task_barrier_enter(struct task_barrier * tb)78 static inline void task_barrier_enter(struct task_barrier *tb)
79 {
80 if (atomic_inc_return(&tb->count) == tb->n)
81 task_barrier_signal_turnstile(&tb->enter_turnstile, tb->n);
82
83 down(&tb->enter_turnstile);
84 }
85
86 /*
87 * Lines up all the threads AFTER the critical point.
88 *
89 * This function is used to avoid any one thread running ahead if the barrier is
90 * used repeatedly .
91 */
task_barrier_exit(struct task_barrier * tb)92 static inline void task_barrier_exit(struct task_barrier *tb)
93 {
94 if (atomic_dec_return(&tb->count) == 0)
95 task_barrier_signal_turnstile(&tb->exit_turnstile, tb->n);
96
97 down(&tb->exit_turnstile);
98 }
99
100 /* Convinieince function when nothing to be done in between entry and exit */
task_barrier_full(struct task_barrier * tb)101 static inline void task_barrier_full(struct task_barrier *tb)
102 {
103 task_barrier_enter(tb);
104 task_barrier_exit(tb);
105 }
106
107 #endif
108