1 /* 2 * Copyright (C) 2010 David Xu <davidxu@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 26 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include "namespace.h" 33 #include <sys/types.h> 34 #include <sys/queue.h> 35 #include <sys/mman.h> 36 #include <sys/stat.h> 37 #include <errno.h> 38 #include <machine/atomic.h> 39 #include <sys/umtx.h> 40 #include <limits.h> 41 #include <fcntl.h> 42 #include <pthread.h> 43 #include <stdarg.h> 44 #include <stdlib.h> 45 #include <string.h> 46 #include <time.h> 47 #include <semaphore.h> 48 #include <unistd.h> 49 #include "un-namespace.h" 50 51 __weak_reference(_sem_close, sem_close); 52 __weak_reference(_sem_destroy, sem_destroy); 53 __weak_reference(_sem_getvalue, sem_getvalue); 54 __weak_reference(_sem_init, sem_init); 55 __weak_reference(_sem_open, sem_open); 56 __weak_reference(_sem_post, sem_post); 57 __weak_reference(_sem_timedwait, sem_timedwait); 58 __weak_reference(_sem_trywait, sem_trywait); 59 __weak_reference(_sem_unlink, sem_unlink); 60 __weak_reference(_sem_wait, sem_wait); 61 62 #define SEM_PREFIX "/tmp/SEMD" 63 #define SEM_MAGIC ((u_int32_t)0x73656d31) 64 65 struct sem_nameinfo { 66 int open_count; 67 char *name; 68 sem_t *sem; 69 LIST_ENTRY(sem_nameinfo) next; 70 }; 71 72 static pthread_once_t once = PTHREAD_ONCE_INIT; 73 static pthread_mutex_t sem_llock; 74 static LIST_HEAD(,sem_nameinfo) sem_list = LIST_HEAD_INITIALIZER(sem_list); 75 76 static void 77 sem_prefork() 78 { 79 80 _pthread_mutex_lock(&sem_llock); 81 } 82 83 static void 84 sem_postfork() 85 { 86 _pthread_mutex_unlock(&sem_llock); 87 } 88 89 static void 90 sem_child_postfork() 91 { 92 _pthread_mutex_unlock(&sem_llock); 93 } 94 95 static void 96 sem_module_init(void) 97 { 98 pthread_mutexattr_t ma; 99 100 _pthread_mutexattr_init(&ma); 101 _pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_RECURSIVE); 102 _pthread_mutex_init(&sem_llock, &ma); 103 _pthread_mutexattr_destroy(&ma); 104 _pthread_atfork(sem_prefork, sem_postfork, sem_child_postfork); 105 } 106 107 static inline int 108 sem_check_validity(sem_t *sem) 109 { 110 111 if (sem->_magic == SEM_MAGIC) 112 return (0); 113 else { 114 errno = EINVAL; 115 return (-1); 116 } 117 } 118 119 int 120 _sem_init(sem_t *sem, int pshared, unsigned int value) 121 { 122 123 if (value > SEM_VALUE_MAX) { 124 errno = EINVAL; 125 return (-1); 126 } 127 128 bzero(sem, sizeof(sem_t)); 129 sem->_magic = SEM_MAGIC; 130 sem->_kern._count = (u_int32_t)value; 131 sem->_kern._has_waiters = 0; 132 sem->_kern._flags = pshared ? USYNC_PROCESS_SHARED : 0; 133 return (0); 134 } 135 136 sem_t * 137 _sem_open(const char *name, int flags, ...) 138 { 139 char path[PATH_MAX]; 140 141 struct stat sb; 142 va_list ap; 143 struct sem_nameinfo *ni = NULL; 144 sem_t *sem = NULL; 145 int fd = -1, mode, len, errsave; 146 int value = 0; 147 148 if (name[0] != '/') { 149 errno = EINVAL; 150 return (SEM_FAILED); 151 } 152 name++; 153 154 if (flags & ~(O_CREAT|O_EXCL)) { 155 errno = EINVAL; 156 return (SEM_FAILED); 157 } 158 159 _pthread_once(&once, sem_module_init); 160 161 _pthread_mutex_lock(&sem_llock); 162 LIST_FOREACH(ni, &sem_list, next) { 163 if (strcmp(name, ni->name) == 0) { 164 ni->open_count++; 165 sem = ni->sem; 166 _pthread_mutex_unlock(&sem_llock); 167 return (sem); 168 } 169 } 170 171 if (flags & O_CREAT) { 172 va_start(ap, flags); 173 mode = va_arg(ap, int); 174 value = va_arg(ap, int); 175 va_end(ap); 176 } 177 178 len = sizeof(*ni) + strlen(name) + 1; 179 ni = (struct sem_nameinfo *)malloc(len); 180 if (ni == NULL) { 181 errno = ENOSPC; 182 goto error; 183 } 184 185 ni->name = (char *)(ni+1); 186 strcpy(ni->name, name); 187 188 strcpy(path, SEM_PREFIX); 189 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) { 190 errno = ENAMETOOLONG; 191 goto error; 192 } 193 194 fd = _open(path, flags|O_RDWR, mode); 195 if (fd == -1) 196 goto error; 197 if (flock(fd, LOCK_EX) == -1) 198 goto error; 199 if (_fstat(fd, &sb)) { 200 flock(fd, LOCK_UN); 201 goto error; 202 } 203 if (sb.st_size < sizeof(sem_t)) { 204 sem_t tmp; 205 206 tmp._magic = SEM_MAGIC; 207 tmp._kern._has_waiters = 0; 208 tmp._kern._count = value; 209 tmp._kern._flags = USYNC_PROCESS_SHARED | SEM_NAMED; 210 if (_write(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) { 211 flock(fd, LOCK_UN); 212 goto error; 213 } 214 } 215 flock(fd, LOCK_UN); 216 sem = (sem_t *)mmap(NULL, sizeof(sem_t), PROT_READ|PROT_WRITE, 217 MAP_SHARED|MAP_NOSYNC, fd, 0); 218 if (sem == MAP_FAILED) { 219 sem = NULL; 220 if (errno == ENOMEM) 221 errno = ENOSPC; 222 goto error; 223 } 224 if (sem->_magic != SEM_MAGIC) { 225 errno = EINVAL; 226 goto error; 227 } 228 ni->open_count = 1; 229 ni->sem = sem; 230 LIST_INSERT_HEAD(&sem_list, ni, next); 231 _pthread_mutex_unlock(&sem_llock); 232 _close(fd); 233 return (sem); 234 235 error: 236 errsave = errno; 237 _pthread_mutex_unlock(&sem_llock); 238 if (fd != -1) 239 _close(fd); 240 if (sem != NULL) 241 munmap(sem, sizeof(sem_t)); 242 free(ni); 243 errno = errsave; 244 return (SEM_FAILED); 245 } 246 247 int 248 _sem_close(sem_t *sem) 249 { 250 struct sem_nameinfo *ni; 251 252 if (sem_check_validity(sem) != 0) 253 return (-1); 254 255 if (!(sem->_kern._flags & SEM_NAMED)) { 256 errno = EINVAL; 257 return (-1); 258 } 259 260 _pthread_once(&once, sem_module_init); 261 262 _pthread_mutex_lock(&sem_llock); 263 LIST_FOREACH(ni, &sem_list, next) { 264 if (sem == ni->sem) { 265 if (--ni->open_count > 0) { 266 _pthread_mutex_unlock(&sem_llock); 267 return (0); 268 } 269 else 270 break; 271 } 272 } 273 274 if (ni) { 275 LIST_REMOVE(ni, next); 276 _pthread_mutex_unlock(&sem_llock); 277 munmap(sem, sizeof(*sem)); 278 free(ni); 279 return (0); 280 } 281 _pthread_mutex_unlock(&sem_llock); 282 errno = EINVAL; 283 return (-1); 284 } 285 286 int 287 _sem_unlink(const char *name) 288 { 289 char path[PATH_MAX]; 290 291 if (name[0] != '/') { 292 errno = ENOENT; 293 return -1; 294 } 295 name++; 296 297 strcpy(path, SEM_PREFIX); 298 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) { 299 errno = ENAMETOOLONG; 300 return (-1); 301 } 302 return unlink(path); 303 } 304 305 int 306 _sem_destroy(sem_t *sem) 307 { 308 309 if (sem_check_validity(sem) != 0) 310 return (-1); 311 312 if (sem->_kern._flags & SEM_NAMED) { 313 errno = EINVAL; 314 return (-1); 315 } 316 sem->_magic = 0; 317 return (0); 318 } 319 320 int 321 _sem_getvalue(sem_t * __restrict sem, int * __restrict sval) 322 { 323 324 if (sem_check_validity(sem) != 0) 325 return (-1); 326 327 *sval = (int)sem->_kern._count; 328 return (0); 329 } 330 331 static __inline int 332 usem_wake(struct _usem *sem) 333 { 334 if (!sem->_has_waiters) 335 return (0); 336 return _umtx_op(sem, UMTX_OP_SEM_WAKE, 0, NULL, NULL); 337 } 338 339 static __inline int 340 usem_wait(struct _usem *sem, const struct timespec *timeout) 341 { 342 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 343 timeout->tv_nsec <= 0))) { 344 errno = ETIMEDOUT; 345 return (-1); 346 } 347 return _umtx_op(sem, UMTX_OP_SEM_WAIT, 0, NULL, 348 __DECONST(void*, timeout)); 349 } 350 351 int 352 _sem_trywait(sem_t *sem) 353 { 354 int val; 355 356 if (sem_check_validity(sem) != 0) 357 return (-1); 358 359 while ((val = sem->_kern._count) > 0) { 360 if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1)) 361 return (0); 362 } 363 errno = EAGAIN; 364 return (-1); 365 } 366 367 static void 368 sem_cancel_handler(void *arg) 369 { 370 sem_t *sem = arg; 371 372 if (sem->_kern._has_waiters && sem->_kern._count) 373 usem_wake(&sem->_kern); 374 } 375 376 #define TIMESPEC_SUB(dst, src, val) \ 377 do { \ 378 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ 379 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ 380 if ((dst)->tv_nsec < 0) { \ 381 (dst)->tv_sec--; \ 382 (dst)->tv_nsec += 1000000000; \ 383 } \ 384 } while (0) 385 386 387 static __inline int 388 enable_async_cancel(void) 389 { 390 int old; 391 392 _pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old); 393 return (old); 394 } 395 396 static __inline void 397 restore_async_cancel(int val) 398 { 399 _pthread_setcanceltype(val, NULL); 400 } 401 402 int 403 _sem_timedwait(sem_t * __restrict sem, 404 const struct timespec * __restrict abstime) 405 { 406 struct timespec ts, ts2; 407 int val, retval, saved_cancel; 408 409 if (sem_check_validity(sem) != 0) 410 return (-1); 411 412 retval = 0; 413 for (;;) { 414 while ((val = sem->_kern._count) > 0) { 415 if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1)) 416 return (0); 417 } 418 419 if (retval) 420 break; 421 422 /* 423 * The timeout argument is only supposed to 424 * be checked if the thread would have blocked. 425 */ 426 if (abstime != NULL) { 427 if (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0) { 428 errno = EINVAL; 429 return (-1); 430 } 431 clock_gettime(CLOCK_REALTIME, &ts); 432 TIMESPEC_SUB(&ts2, abstime, &ts); 433 } 434 pthread_cleanup_push(sem_cancel_handler, sem); 435 saved_cancel = enable_async_cancel(); 436 retval = usem_wait(&sem->_kern, abstime ? &ts2 : NULL); 437 restore_async_cancel(saved_cancel); 438 pthread_cleanup_pop(0); 439 } 440 return (retval); 441 } 442 443 int 444 _sem_wait(sem_t *sem) 445 { 446 return _sem_timedwait(sem, NULL); 447 } 448 449 /* 450 * POSIX: 451 * The sem_post() interface is reentrant with respect to signals and may be 452 * invoked from a signal-catching function. 453 * The implementation does not use lock, so it should be safe. 454 */ 455 int 456 _sem_post(sem_t *sem) 457 { 458 459 if (sem_check_validity(sem) != 0) 460 return (-1); 461 462 atomic_add_rel_int(&sem->_kern._count, 1); 463 return usem_wake(&sem->_kern); 464 } 465