1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
4 * Copyright (C) 2007 The Regents of the University of California.
5 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
6 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
7 * UCRL-CODE-235197
8 *
9 * This file is part of the SPL, Solaris Porting Layer.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 */
24
25 #ifndef _SPL_MUTEX_H
26 #define _SPL_MUTEX_H
27
28 #include <sys/types.h>
29 #include <linux/sched.h>
30 #include <linux/mutex.h>
31 #include <linux/lockdep.h>
32 #include <linux/compiler_compat.h>
33
34 typedef enum {
35 MUTEX_DEFAULT = 0,
36 MUTEX_SPIN = 1,
37 MUTEX_ADAPTIVE = 2,
38 MUTEX_NOLOCKDEP = 3
39 } kmutex_type_t;
40
41 typedef struct {
42 struct mutex m_mutex;
43 spinlock_t m_lock; /* used for serializing mutex_exit */
44 kthread_t *m_owner;
45 #ifdef CONFIG_LOCKDEP
46 kmutex_type_t m_type;
47 #endif /* CONFIG_LOCKDEP */
48 } kmutex_t;
49
50 #define MUTEX(mp) (&((mp)->m_mutex))
51
52 static inline void
spl_mutex_set_owner(kmutex_t * mp)53 spl_mutex_set_owner(kmutex_t *mp)
54 {
55 mp->m_owner = current;
56 }
57
58 static inline void
spl_mutex_clear_owner(kmutex_t * mp)59 spl_mutex_clear_owner(kmutex_t *mp)
60 {
61 mp->m_owner = NULL;
62 }
63
64 #define mutex_owner(mp) (READ_ONCE((mp)->m_owner))
65 #define mutex_owned(mp) (mutex_owner(mp) == current)
66 #define MUTEX_HELD(mp) mutex_owned(mp)
67 #define MUTEX_NOT_HELD(mp) (!MUTEX_HELD(mp))
68
69 #ifdef CONFIG_LOCKDEP
70 static inline void
spl_mutex_set_type(kmutex_t * mp,kmutex_type_t type)71 spl_mutex_set_type(kmutex_t *mp, kmutex_type_t type)
72 {
73 mp->m_type = type;
74 }
75 static inline void
spl_mutex_lockdep_off_maybe(kmutex_t * mp)76 spl_mutex_lockdep_off_maybe(kmutex_t *mp) \
77 { \
78 if (mp && mp->m_type == MUTEX_NOLOCKDEP) \
79 lockdep_off(); \
80 }
81 static inline void
spl_mutex_lockdep_on_maybe(kmutex_t * mp)82 spl_mutex_lockdep_on_maybe(kmutex_t *mp) \
83 { \
84 if (mp && mp->m_type == MUTEX_NOLOCKDEP) \
85 lockdep_on(); \
86 }
87 #else /* CONFIG_LOCKDEP */
88 #define spl_mutex_set_type(mp, type)
89 #define spl_mutex_lockdep_off_maybe(mp)
90 #define spl_mutex_lockdep_on_maybe(mp)
91 #endif /* CONFIG_LOCKDEP */
92
93 /*
94 * The following functions must be a #define and not static inline.
95 * This ensures that the native linux mutex functions (lock/unlock)
96 * will be correctly located in the users code which is important
97 * for the built in kernel lock analysis tools
98 */
99 #undef mutex_init
100 #define mutex_init(mp, name, type, ibc) \
101 { \
102 static struct lock_class_key __key; \
103 ASSERT(type == MUTEX_DEFAULT || type == MUTEX_NOLOCKDEP); \
104 \
105 __mutex_init(MUTEX(mp), (name) ? (#name) : (#mp), &__key); \
106 spin_lock_init(&(mp)->m_lock); \
107 spl_mutex_clear_owner(mp); \
108 spl_mutex_set_type(mp, type); \
109 }
110
111 #undef mutex_destroy
112 #define mutex_destroy(mp) \
113 { \
114 VERIFY3P(mutex_owner(mp), ==, NULL); \
115 }
116
117 #define mutex_tryenter(mp) \
118 /* CSTYLED */ \
119 ({ \
120 int _rc_; \
121 \
122 spl_mutex_lockdep_off_maybe(mp); \
123 if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1) \
124 spl_mutex_set_owner(mp); \
125 spl_mutex_lockdep_on_maybe(mp); \
126 \
127 _rc_; \
128 })
129
130 #define NESTED_SINGLE 1
131
132 #define mutex_enter_nested(mp, subclass) \
133 { \
134 ASSERT3P(mutex_owner(mp), !=, current); \
135 spl_mutex_lockdep_off_maybe(mp); \
136 mutex_lock_nested(MUTEX(mp), (subclass)); \
137 spl_mutex_lockdep_on_maybe(mp); \
138 spl_mutex_set_owner(mp); \
139 }
140
141 #define mutex_enter_interruptible(mp) \
142 /* CSTYLED */ \
143 ({ \
144 int _rc_; \
145 \
146 ASSERT3P(mutex_owner(mp), !=, current); \
147 spl_mutex_lockdep_off_maybe(mp); \
148 _rc_ = mutex_lock_interruptible(MUTEX(mp)); \
149 spl_mutex_lockdep_on_maybe(mp); \
150 if (!_rc_) { \
151 spl_mutex_set_owner(mp); \
152 } \
153 \
154 _rc_; \
155 })
156
157 #define mutex_enter(mp) mutex_enter_nested((mp), 0)
158
159 /*
160 * The reason for the spinlock:
161 *
162 * The Linux mutex is designed with a fast-path/slow-path design such that it
163 * does not guarantee serialization upon itself, allowing a race where latter
164 * acquirers finish mutex_unlock before former ones.
165 *
166 * The race renders it unsafe to be used for serializing the freeing of an
167 * object in which the mutex is embedded, where the latter acquirer could go
168 * on to free the object while the former one is still doing mutex_unlock and
169 * causing memory corruption.
170 *
171 * However, there are many places in ZFS where the mutex is used for
172 * serializing object freeing, and the code is shared among other OSes without
173 * this issue. Thus, we need the spinlock to force the serialization on
174 * mutex_exit().
175 *
176 * See http://lwn.net/Articles/575477/ for the information about the race.
177 */
178 #define mutex_exit(mp) \
179 { \
180 ASSERT3P(mutex_owner(mp), ==, current); \
181 spl_mutex_clear_owner(mp); \
182 spin_lock(&(mp)->m_lock); \
183 spl_mutex_lockdep_off_maybe(mp); \
184 mutex_unlock(MUTEX(mp)); \
185 spl_mutex_lockdep_on_maybe(mp); \
186 spin_unlock(&(mp)->m_lock); \
187 /* NOTE: do not dereference mp after this point */ \
188 }
189
190 #endif /* _SPL_MUTEX_H */
191