1 /* 2 * Copyright (c) 2004 Michael Telahun Makonnen <mtm@FreeBSD.Org> 3 * Copyright (c) 1997 John Birrell <jb@cimlogic.com.au>. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by John Birrell. 17 * 4. Neither the name of the author nor the names of any co-contributors 18 * may be used to endorse or promote products derived from this software 19 * without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31 * SUCH DAMAGE. 32 * 33 * $FreeBSD$ 34 * 35 */ 36 37 #include <sys/types.h> 38 #include <machine/atomic.h> 39 40 #include <stdlib.h> 41 #include <stdio.h> 42 #include <string.h> 43 #include <sched.h> 44 #include <pthread.h> 45 #include <unistd.h> 46 47 #include <libc_private.h> 48 49 #include "thr_private.h" 50 51 #define THR_SPIN_MAGIC 0xdadadada 52 #define THR_SPIN_UNOWNED (void *)0 53 #define MAGIC_TEST_RETURN_ON_FAIL(l) \ 54 do { \ 55 if ((l) == NULL || (l)->s_magic != THR_SPIN_MAGIC) \ 56 return (EINVAL); \ 57 } while(0) 58 59 __weak_reference(_pthread_spin_destroy, pthread_spin_destroy); 60 __weak_reference(_pthread_spin_init, pthread_spin_init); 61 __weak_reference(_pthread_spin_lock, pthread_spin_lock); 62 __weak_reference(_pthread_spin_trylock, pthread_spin_trylock); 63 __weak_reference(_pthread_spin_unlock, pthread_spin_unlock); 64 65 int 66 _pthread_spin_destroy(pthread_spinlock_t *lock) 67 { 68 MAGIC_TEST_RETURN_ON_FAIL((*lock)); 69 if ((*lock)->s_owner == THR_SPIN_UNOWNED) { 70 (*lock)->s_magic = 0; 71 free((*lock)); 72 *lock = NULL; 73 return (0); 74 } 75 return (EBUSY); 76 } 77 78 int 79 _pthread_spin_init(pthread_spinlock_t *lock, int pshared) 80 { 81 struct pthread_spinlock *s; 82 83 if (*lock != NULL) { 84 if ((*lock)->s_magic == THR_SPIN_MAGIC) 85 return (EBUSY); 86 } 87 s = (struct pthread_spinlock *)malloc(sizeof(struct pthread_spinlock)); 88 if (s == NULL) 89 return (ENOMEM); 90 s->s_magic = THR_SPIN_MAGIC; 91 s->s_owner = THR_SPIN_UNOWNED; 92 *lock = s; 93 return (0); 94 } 95 96 /* 97 * If the caller sets nonblocking to 1, this function will return 98 * immediately without acquiring the lock it is owned by another thread. 99 * If set to 0, it will keep spinning until it acquires the lock. 100 */ 101 int 102 _pthread_spin_lock(pthread_spinlock_t *lock) 103 { 104 MAGIC_TEST_RETURN_ON_FAIL(*lock); 105 if ((*lock)->s_owner == curthread) 106 return (EDEADLK); 107 while (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED, 108 (void *)curthread) != 1) 109 ; /* SPIN */ 110 return (0); 111 } 112 113 int 114 _pthread_spin_trylock(pthread_spinlock_t *lock) 115 { 116 MAGIC_TEST_RETURN_ON_FAIL(*lock); 117 if (atomic_cmpset_acq_ptr(&(*lock)->s_owner, THR_SPIN_UNOWNED, 118 (void *)curthread) == 1) 119 return (0); 120 return (EBUSY); 121 } 122 123 int 124 _pthread_spin_unlock(pthread_spinlock_t *lock) 125 { 126 MAGIC_TEST_RETURN_ON_FAIL(*lock); 127 if (atomic_cmpset_rel_ptr(&(*lock)->s_owner, (void *)curthread, 128 THR_SPIN_UNOWNED) == 1) 129 return (0); 130 return (EPERM); 131 } 132 133 void 134 _spinunlock(spinlock_t *lck) 135 { 136 if (umtx_unlock((struct umtx *)lck, curthread->thr_id)) 137 abort(); 138 } 139 140 /* 141 * Lock a location for the running thread. Yield to allow other 142 * threads to run if this thread is blocked because the lock is 143 * not available. Note that this function does not sleep. It 144 * assumes that the lock will be available very soon. 145 */ 146 void 147 _spinlock(spinlock_t *lck) 148 { 149 if (umtx_lock((struct umtx *)lck, curthread->thr_id)) 150 abort(); 151 } 152 153 int 154 _spintrylock(spinlock_t *lck) 155 { 156 int error; 157 158 error = umtx_lock((struct umtx *)lck, curthread->thr_id); 159 if (error != 0 && error != EBUSY) 160 abort(); 161 return (error); 162 } 163 164 /* 165 * Lock a location for the running thread. Yield to allow other 166 * threads to run if this thread is blocked because the lock is 167 * not available. Note that this function does not sleep. It 168 * assumes that the lock will be available very soon. 169 * 170 * This function checks if the running thread has already locked the 171 * location, warns if this occurs and creates a thread dump before 172 * returning. 173 */ 174 void 175 _spinlock_debug(spinlock_t *lck, char *fname, int lineno) 176 { 177 if (umtx_lock((struct umtx *)lck, curthread->thr_id)) 178 abort(); 179 } 180