xref: /linux/include/linux/spinlock_api_up.h (revision e4588c25c9d122b5847b88e18b184404b6959160)
1 #ifndef __LINUX_SPINLOCK_API_UP_H
2 #define __LINUX_SPINLOCK_API_UP_H
3 
4 #ifndef __LINUX_INSIDE_SPINLOCK_H
5 # error "please don't include this file directly"
6 #endif
7 
8 /*
9  * include/linux/spinlock_api_up.h
10  *
11  * spinlock API implementation on UP-nondebug (inlined implementation)
12  *
13  * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
14  * Released under the General Public License (GPL).
15  */
16 
17 #define in_lock_functions(ADDR)		0
18 
19 #define assert_raw_spin_locked(lock)	do { (void)(lock); } while (0)
20 
21 /*
22  * In the UP-nondebug case there's no real locking going on, so the
23  * only thing we have to do is to keep the preempt counts and irq
24  * flags straight, to suppress compiler warnings of unused lock
25  * variables, and to add the proper checker annotations:
26  */
27 #define ___LOCK_(lock) \
28   do { __acquire(lock); (void)(lock); } while (0)
29 
30 #define ___LOCK_shared(lock) \
31   do { __acquire_shared(lock); (void)(lock); } while (0)
32 
33 #define __LOCK(lock, ...) \
34   do { preempt_disable(); ___LOCK_##__VA_ARGS__(lock); } while (0)
35 
36 #define __LOCK_BH(lock, ...) \
37   do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK_##__VA_ARGS__(lock); } while (0)
38 
39 #define __LOCK_IRQ(lock, ...) \
40   do { local_irq_disable(); __LOCK(lock, ##__VA_ARGS__); } while (0)
41 
42 #define __LOCK_IRQSAVE(lock, flags, ...) \
43   do { local_irq_save(flags); __LOCK(lock, ##__VA_ARGS__); } while (0)
44 
45 #define ___UNLOCK_(lock) \
46   do { __release(lock); (void)(lock); } while (0)
47 
48 #define ___UNLOCK_shared(lock) \
49   do { __release_shared(lock); (void)(lock); } while (0)
50 
51 #define __UNLOCK(lock, ...) \
52   do { preempt_enable(); ___UNLOCK_##__VA_ARGS__(lock); } while (0)
53 
54 #define __UNLOCK_BH(lock, ...) \
55   do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \
56        ___UNLOCK_##__VA_ARGS__(lock); } while (0)
57 
58 #define __UNLOCK_IRQ(lock, ...) \
59   do { local_irq_enable(); __UNLOCK(lock, ##__VA_ARGS__); } while (0)
60 
61 #define __UNLOCK_IRQRESTORE(lock, flags, ...) \
62   do { local_irq_restore(flags); __UNLOCK(lock, ##__VA_ARGS__); } while (0)
63 
64 #define _raw_spin_lock(lock)			__LOCK(lock)
65 #define _raw_spin_lock_nested(lock, subclass)	__LOCK(lock)
66 #define _raw_read_lock(lock)			__LOCK(lock, shared)
67 #define _raw_write_lock(lock)			__LOCK(lock)
68 #define _raw_write_lock_nested(lock, subclass)	__LOCK(lock)
69 #define _raw_spin_lock_bh(lock)			__LOCK_BH(lock)
70 #define _raw_read_lock_bh(lock)			__LOCK_BH(lock, shared)
71 #define _raw_write_lock_bh(lock)		__LOCK_BH(lock)
72 #define _raw_spin_lock_irq(lock)		__LOCK_IRQ(lock)
73 #define _raw_read_lock_irq(lock)		__LOCK_IRQ(lock, shared)
74 #define _raw_write_lock_irq(lock)		__LOCK_IRQ(lock)
75 #define _raw_spin_lock_irqsave(lock, flags)	__LOCK_IRQSAVE(lock, flags)
76 #define _raw_read_lock_irqsave(lock, flags)	__LOCK_IRQSAVE(lock, flags, shared)
77 #define _raw_write_lock_irqsave(lock, flags)	__LOCK_IRQSAVE(lock, flags)
78 
79 static __always_inline int _raw_spin_trylock(raw_spinlock_t *lock)
80 	__cond_acquires(true, lock)
81 {
82 	__LOCK(lock);
83 	return 1;
84 }
85 
86 static __always_inline int _raw_spin_trylock_bh(raw_spinlock_t *lock)
87 	__cond_acquires(true, lock)
88 {
89 	__LOCK_BH(lock);
90 	return 1;
91 }
92 
93 static __always_inline int _raw_spin_trylock_irq(raw_spinlock_t *lock)
94 	__cond_acquires(true, lock)
95 {
96 	__LOCK_IRQ(lock);
97 	return 1;
98 }
99 
100 static __always_inline int _raw_spin_trylock_irqsave(raw_spinlock_t *lock, unsigned long *flags)
101 	__cond_acquires(true, lock)
102 {
103 	__LOCK_IRQSAVE(lock, *(flags));
104 	return 1;
105 }
106 
107 static __always_inline int _raw_read_trylock(rwlock_t *lock)
108 	__cond_acquires_shared(true, lock)
109 {
110 	__LOCK(lock, shared);
111 	return 1;
112 }
113 
114 static __always_inline int _raw_write_trylock(rwlock_t *lock)
115 	__cond_acquires(true, lock)
116 {
117 	__LOCK(lock);
118 	return 1;
119 }
120 
121 static __always_inline int _raw_write_trylock_irqsave(rwlock_t *lock, unsigned long *flags)
122 	__cond_acquires(true, lock)
123 {
124 	__LOCK_IRQSAVE(lock, *(flags));
125 	return 1;
126 }
127 
128 #define _raw_spin_unlock(lock)			__UNLOCK(lock)
129 #define _raw_read_unlock(lock)			__UNLOCK(lock, shared)
130 #define _raw_write_unlock(lock)			__UNLOCK(lock)
131 #define _raw_spin_unlock_bh(lock)		__UNLOCK_BH(lock)
132 #define _raw_write_unlock_bh(lock)		__UNLOCK_BH(lock)
133 #define _raw_read_unlock_bh(lock)		__UNLOCK_BH(lock, shared)
134 #define _raw_spin_unlock_irq(lock)		__UNLOCK_IRQ(lock)
135 #define _raw_read_unlock_irq(lock)		__UNLOCK_IRQ(lock, shared)
136 #define _raw_write_unlock_irq(lock)		__UNLOCK_IRQ(lock)
137 #define _raw_spin_unlock_irqrestore(lock, flags) \
138 					__UNLOCK_IRQRESTORE(lock, flags)
139 #define _raw_read_unlock_irqrestore(lock, flags) \
140 					__UNLOCK_IRQRESTORE(lock, flags, shared)
141 #define _raw_write_unlock_irqrestore(lock, flags) \
142 					__UNLOCK_IRQRESTORE(lock, flags)
143 
144 #endif /* __LINUX_SPINLOCK_API_UP_H */
145