xref: /freebsd/sys/compat/linuxkpi/common/src/linux_lock.c (revision 911f0260390e18cf85f3dbf2c719b593efdc1e3c)
1 /*-
2  * Copyright (c) 2017 Mellanox Technologies, Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/queue.h>
30 
31 #include <linux/sched.h>
32 #include <linux/ww_mutex.h>
33 
34 struct ww_mutex_thread {
35 	TAILQ_ENTRY(ww_mutex_thread) entry;
36 	struct thread *thread;
37 	struct ww_mutex *lock;
38 };
39 
40 static TAILQ_HEAD(, ww_mutex_thread) ww_mutex_head;
41 static struct mtx ww_mutex_global;
42 
43 static void
44 linux_ww_init(void *arg)
45 {
46 	TAILQ_INIT(&ww_mutex_head);
47 	mtx_init(&ww_mutex_global, "lkpi-ww-mtx", NULL, MTX_DEF);
48 }
49 
50 SYSINIT(ww_init, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_init, NULL);
51 
52 static void
53 linux_ww_uninit(void *arg)
54 {
55 	mtx_destroy(&ww_mutex_global);
56 }
57 
58 SYSUNINIT(ww_uninit, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_uninit, NULL);
59 
60 static inline void
61 linux_ww_lock(void)
62 {
63 	mtx_lock(&ww_mutex_global);
64 }
65 
66 static inline void
67 linux_ww_unlock(void)
68 {
69 	mtx_unlock(&ww_mutex_global);
70 }
71 
72 /* lock a mutex with deadlock avoidance */
73 int
74 linux_ww_mutex_lock_sub(struct ww_mutex *lock,
75     struct ww_acquire_ctx *ctx, int catch_signal)
76 {
77 	struct task_struct *task;
78 	struct ww_mutex_thread entry;
79 	struct ww_mutex_thread *other;
80 	int retval = 0;
81 
82 	task = current;
83 
84 	linux_ww_lock();
85 	if (unlikely(sx_try_xlock(&lock->base.sx) == 0)) {
86 		entry.thread = curthread;
87 		entry.lock = lock;
88 		TAILQ_INSERT_TAIL(&ww_mutex_head, &entry, entry);
89 
90 		do {
91 			struct thread *owner = (struct thread *)
92 			    SX_OWNER(lock->base.sx.sx_lock);
93 
94 			/* scan for deadlock */
95 			TAILQ_FOREACH(other, &ww_mutex_head, entry) {
96 				/* skip own thread */
97 				if (other == &entry)
98 					continue;
99 				/*
100 				 * If another thread is owning our
101 				 * lock and is at the same time trying
102 				 * to acquire a lock this thread owns,
103 				 * that means deadlock.
104 				 */
105 				if (other->thread == owner &&
106 				    (struct thread *)SX_OWNER(
107 				    other->lock->base.sx.sx_lock) == curthread) {
108 					retval = -EDEADLK;
109 					goto done;
110 				}
111 			}
112 			if (catch_signal) {
113 				retval = -cv_wait_sig(&lock->condvar, &ww_mutex_global);
114 				if (retval != 0) {
115 					linux_schedule_save_interrupt_value(task, retval);
116 					retval = -EINTR;
117 					goto done;
118 				}
119 			} else {
120 				cv_wait(&lock->condvar, &ww_mutex_global);
121 			}
122 		} while (sx_try_xlock(&lock->base.sx) == 0);
123 done:
124 		TAILQ_REMOVE(&ww_mutex_head, &entry, entry);
125 
126 		/* if the lock is free, wakeup next lock waiter, if any */
127 		if ((struct thread *)SX_OWNER(lock->base.sx.sx_lock) == NULL)
128 			cv_signal(&lock->condvar);
129 	}
130 
131 	if (retval == 0)
132 		lock->ctx = ctx;
133 	linux_ww_unlock();
134 	return (retval);
135 }
136 
137 void
138 linux_ww_mutex_unlock_sub(struct ww_mutex *lock)
139 {
140 	/* protect ww_mutex ownership change */
141 	linux_ww_lock();
142 	lock->ctx = NULL;
143 	sx_xunlock(&lock->base.sx);
144 	/* wakeup a lock waiter, if any */
145 	cv_signal(&lock->condvar);
146 	linux_ww_unlock();
147 }
148 
149 int
150 linux_mutex_lock_interruptible(mutex_t *m)
151 {
152 	int error;
153 
154 	error = -sx_xlock_sig(&m->sx);
155 	if (error != 0) {
156 		linux_schedule_save_interrupt_value(current, error);
157 		error = -EINTR;
158 	}
159 	return (error);
160 }
161 
162 int
163 linux_down_read_killable(struct rw_semaphore *rw)
164 {
165 	int error;
166 
167 	error = -sx_slock_sig(&rw->sx);
168 	if (error != 0) {
169 		linux_schedule_save_interrupt_value(current, error);
170 		error = -EINTR;
171 	}
172 	return (error);
173 }
174 
175 int
176 linux_down_write_killable(struct rw_semaphore *rw)
177 {
178 	int error;
179 
180 	error = -sx_xlock_sig(&rw->sx);
181 	if (error != 0) {
182 		linux_schedule_save_interrupt_value(current, error);
183 		error = -EINTR;
184 	}
185 	return (error);
186 }
187