xref: /freebsd/sys/compat/linuxkpi/common/src/linux_lock.c (revision e3514747256465c52c3b2aedc9795f52c0d3efe9)
1 /*-
2  * Copyright (c) 2017 Mellanox Technologies, Ltd.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice unmodified, this list of conditions, and the following
10  *    disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  *
26  * $FreeBSD$
27  */
28 
29 #include <sys/queue.h>
30 
31 #include <linux/ww_mutex.h>
32 
33 struct ww_mutex_thread {
34 	TAILQ_ENTRY(ww_mutex_thread) entry;
35 	struct thread *thread;
36 	struct ww_mutex *lock;
37 };
38 
39 static TAILQ_HEAD(, ww_mutex_thread) ww_mutex_head;
40 static struct mtx ww_mutex_global;
41 
42 static void
43 linux_ww_init(void *arg)
44 {
45 	TAILQ_INIT(&ww_mutex_head);
46 	mtx_init(&ww_mutex_global, "lkpi-ww-mtx", NULL, MTX_DEF);
47 }
48 
49 SYSINIT(ww_init, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_init, NULL);
50 
51 static void
52 linux_ww_uninit(void *arg)
53 {
54 	mtx_destroy(&ww_mutex_global);
55 }
56 
57 SYSUNINIT(ww_uninit, SI_SUB_LOCK, SI_ORDER_SECOND, linux_ww_uninit, NULL);
58 
59 static inline void
60 linux_ww_lock(void)
61 {
62 	mtx_lock(&ww_mutex_global);
63 }
64 
65 static inline void
66 linux_ww_unlock(void)
67 {
68 	mtx_unlock(&ww_mutex_global);
69 }
70 
71 /* lock a mutex with deadlock avoidance */
72 int
73 linux_ww_mutex_lock_sub(struct ww_mutex *lock, int catch_signal)
74 {
75 	struct ww_mutex_thread entry;
76 	struct ww_mutex_thread *other;
77 	int retval = 0;
78 
79 	linux_ww_lock();
80 	if (unlikely(sx_try_xlock(&lock->base.sx) == 0)) {
81 		entry.thread = curthread;
82 		entry.lock = lock;
83 		TAILQ_INSERT_TAIL(&ww_mutex_head, &entry, entry);
84 
85 		do {
86 			struct thread *owner = (struct thread *)
87 			    SX_OWNER(lock->base.sx.sx_lock);
88 
89 			/* scan for deadlock */
90 			TAILQ_FOREACH(other, &ww_mutex_head, entry) {
91 				/* skip own thread */
92 				if (other == &entry)
93 					continue;
94 				/*
95 				 * If another thread is owning our
96 				 * lock and is at the same time trying
97 				 * to acquire a lock this thread owns,
98 				 * that means deadlock.
99 				 */
100 				if (other->thread == owner &&
101 				    (struct thread *)SX_OWNER(
102 				    other->lock->base.sx.sx_lock) == curthread) {
103 					retval = -EDEADLK;
104 					goto done;
105 				}
106 			}
107 			if (catch_signal) {
108 				if (cv_wait_sig(&lock->condvar, &ww_mutex_global) != 0) {
109 					retval = -EINTR;
110 					goto done;
111 				}
112 			} else {
113 				cv_wait(&lock->condvar, &ww_mutex_global);
114 			}
115 		} while (sx_try_xlock(&lock->base.sx) == 0);
116 done:
117 		TAILQ_REMOVE(&ww_mutex_head, &entry, entry);
118 
119 		/* if the lock is free, wakeup next lock waiter, if any */
120 		if ((struct thread *)SX_OWNER(lock->base.sx.sx_lock) == NULL)
121 			cv_signal(&lock->condvar);
122 	}
123 	linux_ww_unlock();
124 	return (retval);
125 }
126 
127 void
128 linux_ww_mutex_unlock_sub(struct ww_mutex *lock)
129 {
130 	/* protect ww_mutex ownership change */
131 	linux_ww_lock();
132 	sx_xunlock(&lock->base.sx);
133 	/* wakeup a lock waiter, if any */
134 	cv_signal(&lock->condvar);
135 	linux_ww_unlock();
136 }
137