xref: /freebsd/lib/libthr/thread/thr_spinlock.c (revision 6b3455a7665208c366849f0b2b3bc916fb97516e)
1 /*
2  * Copyright (c) 2004 Michael Telahun Makonnen <mtm@FreeBSD.Org>
3  * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  * $FreeBSD$
34  *
35  */
36 
37 #include <sys/types.h>
38 #include <machine/atomic.h>
39 
40 #include <stdlib.h>
41 #include <stdio.h>
42 #include <string.h>
43 #include <sched.h>
44 #include <pthread.h>
45 #include <unistd.h>
46 
47 #include <libc_private.h>
48 
49 #include "thr_private.h"
50 
51 #define THR_SPIN_MAGIC		0xdadadada
52 #define THR_SPIN_UNOWNED	(void *)0
53 #define MAGIC_TEST_RETURN_ON_FAIL(l)					   \
54 	do {								   \
55 		if ((l) == NULL || (l)->s_magic != THR_SPIN_MAGIC)	   \
56 			return (EINVAL);				   \
57 	} while(0)
58 
59 __weak_reference(_pthread_spin_destroy, pthread_spin_destroy);
60 __weak_reference(_pthread_spin_init, pthread_spin_init);
61 __weak_reference(_pthread_spin_lock, pthread_spin_lock);
62 __weak_reference(_pthread_spin_trylock, pthread_spin_trylock);
63 __weak_reference(_pthread_spin_unlock, pthread_spin_unlock);
64 
65 int
66 _pthread_spin_destroy(pthread_spinlock_t *lock)
67 {
68 	MAGIC_TEST_RETURN_ON_FAIL((*lock));
69 	if ((*lock)->s_owner == THR_SPIN_UNOWNED) {
70 		(*lock)->s_magic = 0;
71 		free((*lock));
72 		*lock = NULL;
73 		return (0);
74 	}
75 	return (EBUSY);
76 }
77 
78 int
79 _pthread_spin_init(pthread_spinlock_t *lock, int pshared)
80 {
81 	struct pthread_spinlock *s;
82 
83 	s = (struct pthread_spinlock *)malloc(sizeof(struct pthread_spinlock));
84 	if (s == NULL)
85 		return (ENOMEM);
86 	s->s_magic = THR_SPIN_MAGIC;
87 	s->s_owner = THR_SPIN_UNOWNED;
88 	*lock = s;
89 	return (0);
90 }
91 
92 /*
93  * If the caller sets nonblocking to 1, this function will return
94  * immediately without acquiring the lock it is owned by another thread.
95  * If set to 0, it will keep spinning until it acquires the lock.
96  */
97 int
98 _pthread_spin_lock(pthread_spinlock_t *lock)
99 {
100 	MAGIC_TEST_RETURN_ON_FAIL(*lock);
101 	if ((*lock)->s_owner == curthread)
102 		return (EDEADLK);
103         while (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED,
104             (void *)curthread) != 1)
105 		;	/* SPIN */
106 	return (0);
107 }
108 
109 int
110 _pthread_spin_trylock(pthread_spinlock_t *lock)
111 {
112 	MAGIC_TEST_RETURN_ON_FAIL(*lock);
113 	if (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED,
114 	    (void *)curthread) == 1)
115 		return (0);
116 	return (EBUSY);
117 }
118 
119 int
120 _pthread_spin_unlock(pthread_spinlock_t *lock)
121 {
122 	MAGIC_TEST_RETURN_ON_FAIL(*lock);
123 	if (atomic_cmpset_rel_ptr(&(*lock)->s_owner, (void *)curthread,
124 	    THR_SPIN_UNOWNED) == 1)
125 		return (0);
126 	return (EPERM);
127 }
128 
129 void
130 _spinunlock(spinlock_t *lck)
131 {
132 	if (umtx_unlock((struct umtx *)lck, curthread->thr_id))
133 		abort();
134 }
135 
136 /*
137  * Lock a location for the running thread. Yield to allow other
138  * threads to run if this thread is blocked because the lock is
139  * not available. Note that this function does not sleep. It
140  * assumes that the lock will be available very soon.
141  */
142 void
143 _spinlock(spinlock_t *lck)
144 {
145 	if (umtx_lock((struct umtx *)lck, curthread->thr_id))
146 		abort();
147 }
148 
149 int
150 _spintrylock(spinlock_t *lck)
151 {
152 	int error;
153 
154 	error = umtx_lock((struct umtx *)lck, curthread->thr_id);
155 	if (error != 0 && error != EBUSY)
156 		abort();
157 	return (error);
158 }
159 
160 /*
161  * Lock a location for the running thread. Yield to allow other
162  * threads to run if this thread is blocked because the lock is
163  * not available. Note that this function does not sleep. It
164  * assumes that the lock will be available very soon.
165  *
166  * This function checks if the running thread has already locked the
167  * location, warns if this occurs and creates a thread dump before
168  * returning.
169  */
170 void
171 _spinlock_debug(spinlock_t *lck, char *fname, int lineno)
172 {
173 	if (umtx_lock((struct umtx *)lck, curthread->thr_id))
174 		abort();
175 }
176