xref: /linux/include/linux/spinlock_rt.h (revision 0e2036a06dcf61dbd100168830287d6c42cd61e1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #ifndef __LINUX_SPINLOCK_RT_H
3 #define __LINUX_SPINLOCK_RT_H
4 
5 #ifndef __LINUX_INSIDE_SPINLOCK_H
6 #error Do not include directly. Use spinlock.h
7 #endif
8 
9 #ifdef CONFIG_DEBUG_LOCK_ALLOC
10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
11 				struct lock_class_key *key, bool percpu);
12 #else
13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
14 				struct lock_class_key *key, bool percpu)
15 {
16 }
17 #endif
18 
19 #define __spin_lock_init(slock, name, key, percpu)		\
20 do {								\
21 	rt_mutex_base_init(&(slock)->lock);			\
22 	__rt_spin_lock_init(slock, name, key, percpu);		\
23 	__assume_ctx_lock(slock);				\
24 } while (0)
25 
26 #define _spin_lock_init(slock, percpu)				\
27 do {								\
28 	static struct lock_class_key __key;			\
29 	__spin_lock_init(slock, #slock, &__key, percpu);	\
30 } while (0)
31 
32 #define spin_lock_init(slock)		_spin_lock_init(slock, false)
33 #define local_spin_lock_init(slock)	_spin_lock_init(slock, true)
34 
35 extern void rt_spin_lock(spinlock_t *lock) __acquires(lock);
36 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass)	__acquires(lock);
37 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
38 extern void rt_spin_unlock(spinlock_t *lock)	__releases(lock);
39 extern void rt_spin_lock_unlock(spinlock_t *lock);
40 extern int rt_spin_trylock_bh(spinlock_t *lock) __cond_acquires(true, lock);
41 extern int rt_spin_trylock(spinlock_t *lock) __cond_acquires(true, lock);
42 
43 static __always_inline void spin_lock(spinlock_t *lock)
44 	__acquires(lock)
45 {
46 	rt_spin_lock(lock);
47 }
48 
49 #ifdef CONFIG_LOCKDEP
50 # define __spin_lock_nested(lock, subclass)				\
51 	rt_spin_lock_nested(lock, subclass)
52 
53 # define __spin_lock_nest_lock(lock, nest_lock)				\
54 	do {								\
55 		typecheck(struct lockdep_map *, &(nest_lock)->dep_map);	\
56 		rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
57 	} while (0)
58 # define __spin_lock_irqsave_nested(lock, flags, subclass)	\
59 	do {							\
60 		typecheck(unsigned long, flags);		\
61 		flags = 0;					\
62 		__spin_lock_nested(lock, subclass);		\
63 	} while (0)
64 
65 #else
66  /*
67   * Always evaluate the 'subclass' argument to avoid that the compiler
68   * warns about set-but-not-used variables when building with
69   * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
70   */
71 # define __spin_lock_nested(lock, subclass)	spin_lock(((void)(subclass), (lock)))
72 # define __spin_lock_nest_lock(lock, subclass)	spin_lock(((void)(subclass), (lock)))
73 # define __spin_lock_irqsave_nested(lock, flags, subclass)	\
74 	spin_lock_irqsave(((void)(subclass), (lock)), flags)
75 #endif
76 
77 #define spin_lock_nested(lock, subclass)		\
78 	__spin_lock_nested(lock, subclass)
79 
80 #define spin_lock_nest_lock(lock, nest_lock)		\
81 	__spin_lock_nest_lock(lock, nest_lock)
82 
83 #define spin_lock_irqsave_nested(lock, flags, subclass)	\
84 	__spin_lock_irqsave_nested(lock, flags, subclass)
85 
86 static __always_inline void spin_lock_bh(spinlock_t *lock)
87 	__acquires(lock)
88 {
89 	/* Investigate: Drop bh when blocking ? */
90 	local_bh_disable();
91 	rt_spin_lock(lock);
92 }
93 
94 static __always_inline void spin_lock_irq(spinlock_t *lock)
95 	__acquires(lock)
96 {
97 	rt_spin_lock(lock);
98 }
99 
100 #define spin_lock_irqsave(lock, flags)			 \
101 	do {						 \
102 		typecheck(unsigned long, flags);	 \
103 		flags = 0;				 \
104 		spin_lock(lock);			 \
105 	} while (0)
106 
107 static __always_inline void spin_unlock(spinlock_t *lock)
108 	__releases(lock)
109 {
110 	rt_spin_unlock(lock);
111 }
112 
113 static __always_inline void spin_unlock_bh(spinlock_t *lock)
114 	__releases(lock)
115 {
116 	rt_spin_unlock(lock);
117 	local_bh_enable();
118 }
119 
120 static __always_inline void spin_unlock_irq(spinlock_t *lock)
121 	__releases(lock)
122 {
123 	rt_spin_unlock(lock);
124 }
125 
126 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
127 						   unsigned long flags)
128 	__releases(lock)
129 {
130 	rt_spin_unlock(lock);
131 }
132 
133 #define spin_trylock(lock)	rt_spin_trylock(lock)
134 
135 #define spin_trylock_bh(lock)	rt_spin_trylock_bh(lock)
136 
137 #define spin_trylock_irq(lock)	rt_spin_trylock(lock)
138 
139 static __always_inline bool _spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
140 	__cond_acquires(true, lock)
141 {
142 	*flags = 0;
143 	return rt_spin_trylock(lock);
144 }
145 #define spin_trylock_irqsave(lock, flags) _spin_trylock_irqsave(lock, &(flags))
146 
147 #define spin_is_contended(lock)		(((void)(lock), 0))
148 
149 static inline int spin_is_locked(spinlock_t *lock)
150 {
151 	return rt_mutex_base_is_locked(&lock->lock);
152 }
153 
154 #define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
155 
156 #include <linux/rwlock_rt.h>
157 
158 #endif
159