1 /* 2 * Copyright (C) 2010 David Xu <davidxu@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 26 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include "namespace.h" 33 #include <sys/types.h> 34 #include <sys/queue.h> 35 #include <sys/mman.h> 36 #include <sys/stat.h> 37 #include <errno.h> 38 #include <machine/atomic.h> 39 #include <sys/umtx.h> 40 #include <limits.h> 41 #include <fcntl.h> 42 #include <pthread.h> 43 #include <stdarg.h> 44 #include <stdlib.h> 45 #include <string.h> 46 #include <time.h> 47 #include <semaphore.h> 48 #include <unistd.h> 49 #include "un-namespace.h" 50 #include "libc_private.h" 51 52 __weak_reference(_sem_close, sem_close); 53 __weak_reference(_sem_destroy, sem_destroy); 54 __weak_reference(_sem_getvalue, sem_getvalue); 55 __weak_reference(_sem_init, sem_init); 56 __weak_reference(_sem_open, sem_open); 57 __weak_reference(_sem_post, sem_post); 58 __weak_reference(_sem_timedwait, sem_timedwait); 59 __weak_reference(_sem_trywait, sem_trywait); 60 __weak_reference(_sem_unlink, sem_unlink); 61 __weak_reference(_sem_wait, sem_wait); 62 63 #define SEM_PREFIX "/tmp/SEMD" 64 #define SEM_MAGIC ((u_int32_t)0x73656d31) 65 66 struct sem_nameinfo { 67 int open_count; 68 char *name; 69 sem_t *sem; 70 LIST_ENTRY(sem_nameinfo) next; 71 }; 72 73 static pthread_once_t once = PTHREAD_ONCE_INIT; 74 static pthread_mutex_t sem_llock; 75 static LIST_HEAD(,sem_nameinfo) sem_list = LIST_HEAD_INITIALIZER(sem_list); 76 77 static void 78 sem_prefork() 79 { 80 81 _pthread_mutex_lock(&sem_llock); 82 } 83 84 static void 85 sem_postfork() 86 { 87 _pthread_mutex_unlock(&sem_llock); 88 } 89 90 static void 91 sem_child_postfork() 92 { 93 _pthread_mutex_unlock(&sem_llock); 94 } 95 96 static void 97 sem_module_init(void) 98 { 99 pthread_mutexattr_t ma; 100 101 _pthread_mutexattr_init(&ma); 102 _pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_RECURSIVE); 103 _pthread_mutex_init(&sem_llock, &ma); 104 _pthread_mutexattr_destroy(&ma); 105 _pthread_atfork(sem_prefork, sem_postfork, sem_child_postfork); 106 } 107 108 static inline int 109 sem_check_validity(sem_t *sem) 110 { 111 112 if (sem->_magic == SEM_MAGIC) 113 return (0); 114 else { 115 errno = EINVAL; 116 return (-1); 117 } 118 } 119 120 int 121 _sem_init(sem_t *sem, int pshared, unsigned int value) 122 { 123 124 if (value > SEM_VALUE_MAX) { 125 errno = EINVAL; 126 return (-1); 127 } 128 129 bzero(sem, sizeof(sem_t)); 130 sem->_magic = SEM_MAGIC; 131 sem->_kern._count = (u_int32_t)value; 132 sem->_kern._has_waiters = 0; 133 sem->_kern._flags = pshared ? USYNC_PROCESS_SHARED : 0; 134 return (0); 135 } 136 137 sem_t * 138 _sem_open(const char *name, int flags, ...) 139 { 140 char path[PATH_MAX]; 141 142 struct stat sb; 143 va_list ap; 144 struct sem_nameinfo *ni = NULL; 145 sem_t *sem = NULL; 146 int fd = -1, mode, len, errsave; 147 int value = 0; 148 149 if (name[0] != '/') { 150 errno = EINVAL; 151 return (SEM_FAILED); 152 } 153 name++; 154 155 if (flags & ~(O_CREAT|O_EXCL)) { 156 errno = EINVAL; 157 return (SEM_FAILED); 158 } 159 160 _pthread_once(&once, sem_module_init); 161 162 _pthread_mutex_lock(&sem_llock); 163 LIST_FOREACH(ni, &sem_list, next) { 164 if (strcmp(name, ni->name) == 0) { 165 ni->open_count++; 166 sem = ni->sem; 167 _pthread_mutex_unlock(&sem_llock); 168 return (sem); 169 } 170 } 171 172 if (flags & O_CREAT) { 173 va_start(ap, flags); 174 mode = va_arg(ap, int); 175 value = va_arg(ap, int); 176 va_end(ap); 177 } 178 179 len = sizeof(*ni) + strlen(name) + 1; 180 ni = (struct sem_nameinfo *)malloc(len); 181 if (ni == NULL) { 182 errno = ENOSPC; 183 goto error; 184 } 185 186 ni->name = (char *)(ni+1); 187 strcpy(ni->name, name); 188 189 strcpy(path, SEM_PREFIX); 190 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) { 191 errno = ENAMETOOLONG; 192 goto error; 193 } 194 195 fd = _open(path, flags|O_RDWR, mode); 196 if (fd == -1) 197 goto error; 198 if (flock(fd, LOCK_EX) == -1) 199 goto error; 200 if (_fstat(fd, &sb)) { 201 flock(fd, LOCK_UN); 202 goto error; 203 } 204 if (sb.st_size < sizeof(sem_t)) { 205 sem_t tmp; 206 207 tmp._magic = SEM_MAGIC; 208 tmp._kern._has_waiters = 0; 209 tmp._kern._count = value; 210 tmp._kern._flags = USYNC_PROCESS_SHARED | SEM_NAMED; 211 if (_write(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) { 212 flock(fd, LOCK_UN); 213 goto error; 214 } 215 } 216 flock(fd, LOCK_UN); 217 sem = (sem_t *)mmap(NULL, sizeof(sem_t), PROT_READ|PROT_WRITE, 218 MAP_SHARED|MAP_NOSYNC, fd, 0); 219 if (sem == MAP_FAILED) { 220 sem = NULL; 221 if (errno == ENOMEM) 222 errno = ENOSPC; 223 goto error; 224 } 225 if (sem->_magic != SEM_MAGIC) { 226 errno = EINVAL; 227 goto error; 228 } 229 ni->open_count = 1; 230 ni->sem = sem; 231 LIST_INSERT_HEAD(&sem_list, ni, next); 232 _pthread_mutex_unlock(&sem_llock); 233 _close(fd); 234 return (sem); 235 236 error: 237 errsave = errno; 238 _pthread_mutex_unlock(&sem_llock); 239 if (fd != -1) 240 _close(fd); 241 if (sem != NULL) 242 munmap(sem, sizeof(sem_t)); 243 free(ni); 244 errno = errsave; 245 return (SEM_FAILED); 246 } 247 248 int 249 _sem_close(sem_t *sem) 250 { 251 struct sem_nameinfo *ni; 252 253 if (sem_check_validity(sem) != 0) 254 return (-1); 255 256 if (!(sem->_kern._flags & SEM_NAMED)) { 257 errno = EINVAL; 258 return (-1); 259 } 260 261 _pthread_once(&once, sem_module_init); 262 263 _pthread_mutex_lock(&sem_llock); 264 LIST_FOREACH(ni, &sem_list, next) { 265 if (sem == ni->sem) { 266 if (--ni->open_count > 0) { 267 _pthread_mutex_unlock(&sem_llock); 268 return (0); 269 } 270 else 271 break; 272 } 273 } 274 275 if (ni) { 276 LIST_REMOVE(ni, next); 277 _pthread_mutex_unlock(&sem_llock); 278 munmap(sem, sizeof(*sem)); 279 free(ni); 280 return (0); 281 } 282 _pthread_mutex_unlock(&sem_llock); 283 errno = EINVAL; 284 return (-1); 285 } 286 287 int 288 _sem_unlink(const char *name) 289 { 290 char path[PATH_MAX]; 291 292 if (name[0] != '/') { 293 errno = ENOENT; 294 return -1; 295 } 296 name++; 297 298 strcpy(path, SEM_PREFIX); 299 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) { 300 errno = ENAMETOOLONG; 301 return (-1); 302 } 303 return unlink(path); 304 } 305 306 int 307 _sem_destroy(sem_t *sem) 308 { 309 310 if (sem_check_validity(sem) != 0) 311 return (-1); 312 313 if (sem->_kern._flags & SEM_NAMED) { 314 errno = EINVAL; 315 return (-1); 316 } 317 sem->_magic = 0; 318 return (0); 319 } 320 321 int 322 _sem_getvalue(sem_t * __restrict sem, int * __restrict sval) 323 { 324 325 if (sem_check_validity(sem) != 0) 326 return (-1); 327 328 *sval = (int)sem->_kern._count; 329 return (0); 330 } 331 332 static __inline int 333 usem_wake(struct _usem *sem) 334 { 335 if (!sem->_has_waiters) 336 return (0); 337 return _umtx_op(sem, UMTX_OP_SEM_WAKE, 0, NULL, NULL); 338 } 339 340 static __inline int 341 usem_wait(struct _usem *sem, const struct timespec *timeout) 342 { 343 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 344 timeout->tv_nsec <= 0))) { 345 errno = ETIMEDOUT; 346 return (-1); 347 } 348 return _umtx_op(sem, UMTX_OP_SEM_WAIT, 0, NULL, 349 __DECONST(void*, timeout)); 350 } 351 352 int 353 _sem_trywait(sem_t *sem) 354 { 355 int val; 356 357 if (sem_check_validity(sem) != 0) 358 return (-1); 359 360 while ((val = sem->_kern._count) > 0) { 361 if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1)) 362 return (0); 363 } 364 errno = EAGAIN; 365 return (-1); 366 } 367 368 #define TIMESPEC_SUB(dst, src, val) \ 369 do { \ 370 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ 371 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ 372 if ((dst)->tv_nsec < 0) { \ 373 (dst)->tv_sec--; \ 374 (dst)->tv_nsec += 1000000000; \ 375 } \ 376 } while (0) 377 378 379 int 380 _sem_timedwait(sem_t * __restrict sem, 381 const struct timespec * __restrict abstime) 382 { 383 struct timespec ts, ts2; 384 int val, retval; 385 386 if (sem_check_validity(sem) != 0) 387 return (-1); 388 389 retval = 0; 390 for (;;) { 391 while ((val = sem->_kern._count) > 0) { 392 if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1)) 393 return (0); 394 } 395 396 if (retval) { 397 _pthread_testcancel(); 398 break; 399 } 400 401 /* 402 * The timeout argument is only supposed to 403 * be checked if the thread would have blocked. 404 */ 405 if (abstime != NULL) { 406 if (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0) { 407 errno = EINVAL; 408 return (-1); 409 } 410 clock_gettime(CLOCK_REALTIME, &ts); 411 TIMESPEC_SUB(&ts2, abstime, &ts); 412 } 413 _pthread_cancel_enter(1); 414 retval = usem_wait(&sem->_kern, abstime ? &ts2 : NULL); 415 _pthread_cancel_leave(0); 416 } 417 return (retval); 418 } 419 420 int 421 _sem_wait(sem_t *sem) 422 { 423 return _sem_timedwait(sem, NULL); 424 } 425 426 /* 427 * POSIX: 428 * The sem_post() interface is reentrant with respect to signals and may be 429 * invoked from a signal-catching function. 430 * The implementation does not use lock, so it should be safe. 431 */ 432 int 433 _sem_post(sem_t *sem) 434 { 435 436 if (sem_check_validity(sem) != 0) 437 return (-1); 438 439 atomic_add_rel_int(&sem->_kern._count, 1); 440 return usem_wake(&sem->_kern); 441 } 442