xref: /freebsd/sys/contrib/openzfs/include/os/linux/spl/sys/mutex.h (revision 6c1e79df8c651b31ca4f656336e6c5ac29a66482)
1 /*
2  *  Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3  *  Copyright (C) 2007 The Regents of the University of California.
4  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5  *  Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6  *  UCRL-CODE-235197
7  *
8  *  This file is part of the SPL, Solaris Porting Layer.
9  *
10  *  The SPL is free software; you can redistribute it and/or modify it
11  *  under the terms of the GNU General Public License as published by the
12  *  Free Software Foundation; either version 2 of the License, or (at your
13  *  option) any later version.
14  *
15  *  The SPL is distributed in the hope that it will be useful, but WITHOUT
16  *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
18  *  for more details.
19  *
20  *  You should have received a copy of the GNU General Public License along
21  *  with the SPL.  If not, see <http://www.gnu.org/licenses/>.
22  */
23 
24 #ifndef _SPL_MUTEX_H
25 #define	_SPL_MUTEX_H
26 
27 #include <sys/types.h>
28 #include <linux/sched.h>
29 #include <linux/mutex.h>
30 #include <linux/lockdep.h>
31 #include <linux/compiler_compat.h>
32 
33 typedef enum {
34 	MUTEX_DEFAULT	= 0,
35 	MUTEX_SPIN	= 1,
36 	MUTEX_ADAPTIVE	= 2,
37 	MUTEX_NOLOCKDEP	= 3
38 } kmutex_type_t;
39 
40 typedef struct {
41 	struct mutex		m_mutex;
42 	spinlock_t		m_lock;	/* used for serializing mutex_exit */
43 	kthread_t		*m_owner;
44 #ifdef CONFIG_LOCKDEP
45 	kmutex_type_t		m_type;
46 #endif /* CONFIG_LOCKDEP */
47 } kmutex_t;
48 
49 #define	MUTEX(mp)		(&((mp)->m_mutex))
50 
51 static inline void
spl_mutex_set_owner(kmutex_t * mp)52 spl_mutex_set_owner(kmutex_t *mp)
53 {
54 	mp->m_owner = current;
55 }
56 
57 static inline void
spl_mutex_clear_owner(kmutex_t * mp)58 spl_mutex_clear_owner(kmutex_t *mp)
59 {
60 	mp->m_owner = NULL;
61 }
62 
63 #define	mutex_owner(mp)		(READ_ONCE((mp)->m_owner))
64 #define	mutex_owned(mp)		(mutex_owner(mp) == current)
65 #define	MUTEX_HELD(mp)		mutex_owned(mp)
66 #define	MUTEX_NOT_HELD(mp)	(!MUTEX_HELD(mp))
67 
68 #ifdef CONFIG_LOCKDEP
69 static inline void
spl_mutex_set_type(kmutex_t * mp,kmutex_type_t type)70 spl_mutex_set_type(kmutex_t *mp, kmutex_type_t type)
71 {
72 	mp->m_type = type;
73 }
74 static inline void
spl_mutex_lockdep_off_maybe(kmutex_t * mp)75 spl_mutex_lockdep_off_maybe(kmutex_t *mp)			\
76 {								\
77 	if (mp && mp->m_type == MUTEX_NOLOCKDEP)		\
78 		lockdep_off();					\
79 }
80 static inline void
spl_mutex_lockdep_on_maybe(kmutex_t * mp)81 spl_mutex_lockdep_on_maybe(kmutex_t *mp)			\
82 {								\
83 	if (mp && mp->m_type == MUTEX_NOLOCKDEP)		\
84 		lockdep_on();					\
85 }
86 #else  /* CONFIG_LOCKDEP */
87 #define	spl_mutex_set_type(mp, type)
88 #define	spl_mutex_lockdep_off_maybe(mp)
89 #define	spl_mutex_lockdep_on_maybe(mp)
90 #endif /* CONFIG_LOCKDEP */
91 
92 /*
93  * The following functions must be a #define	and not static inline.
94  * This ensures that the native linux mutex functions (lock/unlock)
95  * will be correctly located in the users code which is important
96  * for the built in kernel lock analysis tools
97  */
98 #undef mutex_init
99 #define	mutex_init(mp, name, type, ibc)				\
100 {								\
101 	static struct lock_class_key __key;			\
102 	ASSERT(type == MUTEX_DEFAULT || type == MUTEX_NOLOCKDEP); \
103 								\
104 	__mutex_init(MUTEX(mp), (name) ? (#name) : (#mp), &__key); \
105 	spin_lock_init(&(mp)->m_lock);				\
106 	spl_mutex_clear_owner(mp);				\
107 	spl_mutex_set_type(mp, type);				\
108 }
109 
110 #undef mutex_destroy
111 #define	mutex_destroy(mp)					\
112 {								\
113 	VERIFY3P(mutex_owner(mp), ==, NULL);			\
114 }
115 
116 #define	mutex_tryenter(mp)					\
117 /* CSTYLED */								\
118 ({								\
119 	int _rc_;						\
120 								\
121 	spl_mutex_lockdep_off_maybe(mp);			\
122 	if ((_rc_ = mutex_trylock(MUTEX(mp))) == 1)		\
123 		spl_mutex_set_owner(mp);			\
124 	spl_mutex_lockdep_on_maybe(mp);				\
125 								\
126 	_rc_;							\
127 })
128 
129 #define	NESTED_SINGLE 1
130 
131 #define	mutex_enter_nested(mp, subclass)			\
132 {								\
133 	ASSERT3P(mutex_owner(mp), !=, current);			\
134 	spl_mutex_lockdep_off_maybe(mp);			\
135 	mutex_lock_nested(MUTEX(mp), (subclass));		\
136 	spl_mutex_lockdep_on_maybe(mp);				\
137 	spl_mutex_set_owner(mp);				\
138 }
139 
140 #define	mutex_enter_interruptible(mp)				\
141 /* CSTYLED */							\
142 ({								\
143 	int _rc_;						\
144 								\
145 	ASSERT3P(mutex_owner(mp), !=, current);			\
146 	spl_mutex_lockdep_off_maybe(mp);			\
147 	_rc_ = mutex_lock_interruptible(MUTEX(mp));		\
148 	spl_mutex_lockdep_on_maybe(mp);				\
149 	if (!_rc_) {						\
150 		spl_mutex_set_owner(mp);			\
151 	}							\
152 								\
153 	_rc_;							\
154 })
155 
156 #define	mutex_enter(mp) mutex_enter_nested((mp), 0)
157 
158 /*
159  * The reason for the spinlock:
160  *
161  * The Linux mutex is designed with a fast-path/slow-path design such that it
162  * does not guarantee serialization upon itself, allowing a race where latter
163  * acquirers finish mutex_unlock before former ones.
164  *
165  * The race renders it unsafe to be used for serializing the freeing of an
166  * object in which the mutex is embedded, where the latter acquirer could go
167  * on to free the object while the former one is still doing mutex_unlock and
168  * causing memory corruption.
169  *
170  * However, there are many places in ZFS where the mutex is used for
171  * serializing object freeing, and the code is shared among other OSes without
172  * this issue. Thus, we need the spinlock to force the serialization on
173  * mutex_exit().
174  *
175  * See http://lwn.net/Articles/575477/ for the information about the race.
176  */
177 #define	mutex_exit(mp)						\
178 {								\
179 	ASSERT3P(mutex_owner(mp), ==, current);			\
180 	spl_mutex_clear_owner(mp);				\
181 	spin_lock(&(mp)->m_lock);				\
182 	spl_mutex_lockdep_off_maybe(mp);			\
183 	mutex_unlock(MUTEX(mp));				\
184 	spl_mutex_lockdep_on_maybe(mp);				\
185 	spin_unlock(&(mp)->m_lock);				\
186 	/* NOTE: do not dereference mp after this point */	\
187 }
188 
189 #endif /* _SPL_MUTEX_H */
190