1 /*
2 * Copyright 2010-2015 Samy Al Bahra.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 */
26
27 #ifndef CK_SPINLOCK_ANDERSON_H
28 #define CK_SPINLOCK_ANDERSON_H
29
30 #include <ck_cc.h>
31 #include <ck_limits.h>
32 #include <ck_md.h>
33 #include <ck_pr.h>
34 #include <ck_stdbool.h>
35
36 #ifndef CK_F_SPINLOCK_ANDERSON
37 #define CK_F_SPINLOCK_ANDERSON
38 /*
39 * This is an implementation of Anderson's array-based queuing lock.
40 */
41 struct ck_spinlock_anderson_thread {
42 unsigned int locked;
43 unsigned int position;
44 };
45 typedef struct ck_spinlock_anderson_thread ck_spinlock_anderson_thread_t;
46
47 struct ck_spinlock_anderson {
48 struct ck_spinlock_anderson_thread *slots;
49 unsigned int count;
50 unsigned int wrap;
51 unsigned int mask;
52 char pad[CK_MD_CACHELINE - sizeof(unsigned int) * 3 - sizeof(void *)];
53 unsigned int next;
54 };
55 typedef struct ck_spinlock_anderson ck_spinlock_anderson_t;
56
57 CK_CC_INLINE static void
ck_spinlock_anderson_init(struct ck_spinlock_anderson * lock,struct ck_spinlock_anderson_thread * slots,unsigned int count)58 ck_spinlock_anderson_init(struct ck_spinlock_anderson *lock,
59 struct ck_spinlock_anderson_thread *slots,
60 unsigned int count)
61 {
62 unsigned int i;
63
64 slots[0].locked = false;
65 slots[0].position = 0;
66 for (i = 1; i < count; i++) {
67 slots[i].locked = true;
68 slots[i].position = i;
69 }
70
71 lock->slots = slots;
72 lock->count = count;
73 lock->mask = count - 1;
74 lock->next = 0;
75
76 /*
77 * If the number of threads is not a power of two then compute
78 * appropriate wrap-around value in the case of next slot counter
79 * overflow.
80 */
81 if (count & (count - 1))
82 lock->wrap = (UINT_MAX % count) + 1;
83 else
84 lock->wrap = 0;
85
86 ck_pr_barrier();
87 return;
88 }
89
90 CK_CC_INLINE static bool
ck_spinlock_anderson_locked(struct ck_spinlock_anderson * lock)91 ck_spinlock_anderson_locked(struct ck_spinlock_anderson *lock)
92 {
93 unsigned int position;
94 bool r;
95
96 position = ck_pr_load_uint(&lock->next) & lock->mask;
97 r = ck_pr_load_uint(&lock->slots[position].locked);
98 ck_pr_fence_acquire();
99 return r;
100 }
101
102 CK_CC_INLINE static void
ck_spinlock_anderson_lock(struct ck_spinlock_anderson * lock,struct ck_spinlock_anderson_thread ** slot)103 ck_spinlock_anderson_lock(struct ck_spinlock_anderson *lock,
104 struct ck_spinlock_anderson_thread **slot)
105 {
106 unsigned int position, next;
107 unsigned int count = lock->count;
108
109 /*
110 * If count is not a power of 2, then it is possible for an overflow
111 * to reallocate beginning slots to more than one thread. To avoid this
112 * use a compare-and-swap.
113 */
114 if (lock->wrap != 0) {
115 position = ck_pr_load_uint(&lock->next);
116
117 do {
118 if (position == UINT_MAX)
119 next = lock->wrap;
120 else
121 next = position + 1;
122 } while (ck_pr_cas_uint_value(&lock->next, position,
123 next, &position) == false);
124
125 position %= count;
126 } else {
127 position = ck_pr_faa_uint(&lock->next, 1);
128 position &= lock->mask;
129 }
130
131 /* Serialize with respect to previous thread's store. */
132 ck_pr_fence_load();
133
134 /*
135 * Spin until slot is marked as unlocked. First slot is initialized to
136 * false.
137 */
138 while (ck_pr_load_uint(&lock->slots[position].locked) == true)
139 ck_pr_stall();
140
141 /* Prepare slot for potential re-use by another thread. */
142 ck_pr_store_uint(&lock->slots[position].locked, true);
143 ck_pr_fence_lock();
144
145 *slot = lock->slots + position;
146 return;
147 }
148
149 CK_CC_INLINE static void
ck_spinlock_anderson_unlock(struct ck_spinlock_anderson * lock,struct ck_spinlock_anderson_thread * slot)150 ck_spinlock_anderson_unlock(struct ck_spinlock_anderson *lock,
151 struct ck_spinlock_anderson_thread *slot)
152 {
153 unsigned int position;
154
155 ck_pr_fence_unlock();
156
157 /* Mark next slot as available. */
158 if (lock->wrap == 0)
159 position = (slot->position + 1) & lock->mask;
160 else
161 position = (slot->position + 1) % lock->count;
162
163 ck_pr_store_uint(&lock->slots[position].locked, false);
164 return;
165 }
166 #endif /* CK_F_SPINLOCK_ANDERSON */
167 #endif /* CK_SPINLOCK_ANDERSON_H */
168