xref: /linux/include/linux/spinlock_rt.h (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #ifndef __LINUX_SPINLOCK_RT_H
3 #define __LINUX_SPINLOCK_RT_H
4 
5 #ifndef __LINUX_INSIDE_SPINLOCK_H
6 #error Do not include directly. Use spinlock.h
7 #endif
8 
9 #ifdef CONFIG_DEBUG_LOCK_ALLOC
10 extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
11 				struct lock_class_key *key, bool percpu);
12 #else
13 static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
14 				struct lock_class_key *key, bool percpu)
15 {
16 }
17 #endif
18 
19 #define __spin_lock_init(slock, name, key, percpu)		\
20 do {								\
21 	rt_mutex_base_init(&(slock)->lock);			\
22 	__rt_spin_lock_init(slock, name, key, percpu);		\
23 } while (0)
24 
25 #define _spin_lock_init(slock, percpu)				\
26 do {								\
27 	static struct lock_class_key __key;			\
28 	__spin_lock_init(slock, #slock, &__key, percpu);	\
29 } while (0)
30 
31 #define spin_lock_init(slock)		_spin_lock_init(slock, false)
32 #define local_spin_lock_init(slock)	_spin_lock_init(slock, true)
33 
34 extern void rt_spin_lock(spinlock_t *lock) __acquires(lock);
35 extern void rt_spin_lock_nested(spinlock_t *lock, int subclass)	__acquires(lock);
36 extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock);
37 extern void rt_spin_unlock(spinlock_t *lock)	__releases(lock);
38 extern void rt_spin_lock_unlock(spinlock_t *lock);
39 extern int rt_spin_trylock_bh(spinlock_t *lock);
40 extern int rt_spin_trylock(spinlock_t *lock);
41 
42 static __always_inline void spin_lock(spinlock_t *lock)
43 {
44 	rt_spin_lock(lock);
45 }
46 
47 #ifdef CONFIG_LOCKDEP
48 # define __spin_lock_nested(lock, subclass)				\
49 	rt_spin_lock_nested(lock, subclass)
50 
51 # define __spin_lock_nest_lock(lock, nest_lock)				\
52 	do {								\
53 		typecheck(struct lockdep_map *, &(nest_lock)->dep_map);	\
54 		rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);	\
55 	} while (0)
56 # define __spin_lock_irqsave_nested(lock, flags, subclass)	\
57 	do {							\
58 		typecheck(unsigned long, flags);		\
59 		flags = 0;					\
60 		__spin_lock_nested(lock, subclass);		\
61 	} while (0)
62 
63 #else
64  /*
65   * Always evaluate the 'subclass' argument to avoid that the compiler
66   * warns about set-but-not-used variables when building with
67   * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
68   */
69 # define __spin_lock_nested(lock, subclass)	spin_lock(((void)(subclass), (lock)))
70 # define __spin_lock_nest_lock(lock, subclass)	spin_lock(((void)(subclass), (lock)))
71 # define __spin_lock_irqsave_nested(lock, flags, subclass)	\
72 	spin_lock_irqsave(((void)(subclass), (lock)), flags)
73 #endif
74 
75 #define spin_lock_nested(lock, subclass)		\
76 	__spin_lock_nested(lock, subclass)
77 
78 #define spin_lock_nest_lock(lock, nest_lock)		\
79 	__spin_lock_nest_lock(lock, nest_lock)
80 
81 #define spin_lock_irqsave_nested(lock, flags, subclass)	\
82 	__spin_lock_irqsave_nested(lock, flags, subclass)
83 
84 static __always_inline void spin_lock_bh(spinlock_t *lock)
85 {
86 	/* Investigate: Drop bh when blocking ? */
87 	local_bh_disable();
88 	rt_spin_lock(lock);
89 }
90 
91 static __always_inline void spin_lock_irq(spinlock_t *lock)
92 {
93 	rt_spin_lock(lock);
94 }
95 
96 #define spin_lock_irqsave(lock, flags)			 \
97 	do {						 \
98 		typecheck(unsigned long, flags);	 \
99 		flags = 0;				 \
100 		spin_lock(lock);			 \
101 	} while (0)
102 
103 static __always_inline void spin_unlock(spinlock_t *lock)
104 {
105 	rt_spin_unlock(lock);
106 }
107 
108 static __always_inline void spin_unlock_bh(spinlock_t *lock)
109 {
110 	rt_spin_unlock(lock);
111 	local_bh_enable();
112 }
113 
114 static __always_inline void spin_unlock_irq(spinlock_t *lock)
115 {
116 	rt_spin_unlock(lock);
117 }
118 
119 static __always_inline void spin_unlock_irqrestore(spinlock_t *lock,
120 						   unsigned long flags)
121 {
122 	rt_spin_unlock(lock);
123 }
124 
125 #define spin_trylock(lock)				\
126 	__cond_lock(lock, rt_spin_trylock(lock))
127 
128 #define spin_trylock_bh(lock)				\
129 	__cond_lock(lock, rt_spin_trylock_bh(lock))
130 
131 #define spin_trylock_irq(lock)				\
132 	__cond_lock(lock, rt_spin_trylock(lock))
133 
134 #define spin_trylock_irqsave(lock, flags)		\
135 ({							\
136 	int __locked;					\
137 							\
138 	typecheck(unsigned long, flags);		\
139 	flags = 0;					\
140 	__locked = spin_trylock(lock);			\
141 	__locked;					\
142 })
143 
144 #define spin_is_contended(lock)		(((void)(lock), 0))
145 
146 static inline int spin_is_locked(spinlock_t *lock)
147 {
148 	return rt_mutex_base_is_locked(&lock->lock);
149 }
150 
151 #define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock))
152 
153 #include <linux/rwlock_rt.h>
154 
155 #endif
156