1 /* 2 * Copyright (C) 2010 David Xu <davidxu@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 26 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include "namespace.h" 33 #include <sys/types.h> 34 #include <sys/queue.h> 35 #include <sys/mman.h> 36 #include <sys/stat.h> 37 #include <errno.h> 38 #include <machine/atomic.h> 39 #include <sys/umtx.h> 40 #include <limits.h> 41 #include <fcntl.h> 42 #include <pthread.h> 43 #include <stdarg.h> 44 #include <stdlib.h> 45 #include <string.h> 46 #include <time.h> 47 #include <semaphore.h> 48 #include <unistd.h> 49 #include "un-namespace.h" 50 51 __weak_reference(_sem_close, sem_close); 52 __weak_reference(_sem_destroy, sem_destroy); 53 __weak_reference(_sem_getvalue, sem_getvalue); 54 __weak_reference(_sem_init, sem_init); 55 __weak_reference(_sem_open, sem_open); 56 __weak_reference(_sem_post, sem_post); 57 __weak_reference(_sem_timedwait, sem_timedwait); 58 __weak_reference(_sem_trywait, sem_trywait); 59 __weak_reference(_sem_unlink, sem_unlink); 60 __weak_reference(_sem_wait, sem_wait); 61 62 #define SEM_PREFIX "/tmp/SEMD" 63 #define SEM_MAGIC ((u_int32_t)0x73656d31) 64 65 struct sem_nameinfo { 66 int open_count; 67 char *name; 68 sem_t *sem; 69 LIST_ENTRY(sem_nameinfo) next; 70 }; 71 72 static pthread_once_t once = PTHREAD_ONCE_INIT; 73 static pthread_mutex_t sem_llock; 74 static LIST_HEAD(,sem_nameinfo) sem_list = LIST_HEAD_INITIALIZER(sem_list); 75 76 static void 77 sem_prefork() 78 { 79 80 _pthread_mutex_lock(&sem_llock); 81 } 82 83 static void 84 sem_postfork() 85 { 86 _pthread_mutex_unlock(&sem_llock); 87 } 88 89 static void 90 sem_child_postfork() 91 { 92 _pthread_mutex_unlock(&sem_llock); 93 } 94 95 static void 96 sem_module_init(void) 97 { 98 pthread_mutexattr_t ma; 99 100 _pthread_mutexattr_init(&ma); 101 _pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_RECURSIVE); 102 _pthread_mutex_init(&sem_llock, &ma); 103 _pthread_mutexattr_destroy(&ma); 104 _pthread_atfork(sem_prefork, sem_postfork, sem_child_postfork); 105 } 106 107 static inline int 108 sem_check_validity(sem_t *sem) 109 { 110 111 if (sem->_magic == SEM_MAGIC) 112 return (0); 113 else { 114 errno = EINVAL; 115 return (-1); 116 } 117 } 118 119 int 120 _sem_init(sem_t *sem, int pshared, unsigned int value) 121 { 122 123 if (value > SEM_VALUE_MAX) { 124 errno = EINVAL; 125 return (-1); 126 } 127 128 bzero(sem, sizeof(sem_t)); 129 sem->_magic = SEM_MAGIC; 130 sem->_kern._count = (u_int32_t)value; 131 sem->_kern._has_waiters = 0; 132 sem->_kern._flags = pshared ? USYNC_PROCESS_SHARED : 0; 133 return (0); 134 } 135 136 sem_t * 137 _sem_open(const char *name, int flags, ...) 138 { 139 char path[PATH_MAX]; 140 141 struct stat sb; 142 va_list ap; 143 struct sem_nameinfo *ni = NULL; 144 sem_t *sem = NULL; 145 int fd = -1, mode, len; 146 int value = 0; 147 148 if (name[0] != '/') { 149 errno = EINVAL; 150 return (SEM_FAILED); 151 } 152 name++; 153 154 if (flags & ~(O_CREAT|O_EXCL)) { 155 errno = EINVAL; 156 return (SEM_FAILED); 157 } 158 159 _pthread_once(&once, sem_module_init); 160 161 _pthread_mutex_lock(&sem_llock); 162 LIST_FOREACH(ni, &sem_list, next) { 163 if (strcmp(name, ni->name) == 0) { 164 ni->open_count++; 165 sem = ni->sem; 166 _pthread_mutex_unlock(&sem_llock); 167 return (sem); 168 } 169 } 170 171 if (flags & O_CREAT) { 172 va_start(ap, flags); 173 mode = va_arg(ap, int); 174 value = va_arg(ap, int); 175 va_end(ap); 176 } 177 178 len = sizeof(*ni) + strlen(name) + 1; 179 ni = (struct sem_nameinfo *)malloc(len); 180 if (ni == NULL) { 181 errno = ENOSPC; 182 goto error; 183 } 184 185 ni->name = (char *)(ni+1); 186 strcpy(ni->name, name); 187 188 strcpy(path, SEM_PREFIX); 189 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) { 190 errno = ENAMETOOLONG; 191 goto error; 192 } 193 194 fd = _open(path, flags|O_RDWR, mode); 195 if (fd == -1) 196 goto error; 197 if (flock(fd, LOCK_EX) == -1) 198 goto error; 199 if (_fstat(fd, &sb)) { 200 flock(fd, LOCK_UN); 201 goto error; 202 } 203 if (sb.st_size < sizeof(sem_t)) { 204 sem_t tmp; 205 206 tmp._magic = SEM_MAGIC; 207 tmp._kern._has_waiters = 0; 208 tmp._kern._count = value; 209 tmp._kern._flags = USYNC_PROCESS_SHARED | SEM_NAMED; 210 if (_write(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) { 211 flock(fd, LOCK_UN); 212 goto error; 213 } 214 } 215 flock(fd, LOCK_UN); 216 sem = (sem_t *)mmap(NULL, sizeof(sem_t), PROT_READ|PROT_WRITE, 217 MAP_SHARED|MAP_NOSYNC, fd, 0); 218 if (sem == MAP_FAILED) { 219 sem = NULL; 220 if (errno == ENOMEM) 221 errno = ENOSPC; 222 goto error; 223 } 224 if (sem->_magic != SEM_MAGIC) { 225 errno = EINVAL; 226 goto error; 227 } 228 ni->open_count = 1; 229 ni->sem = sem; 230 LIST_INSERT_HEAD(&sem_list, ni, next); 231 _pthread_mutex_unlock(&sem_llock); 232 _close(fd); 233 return (sem); 234 235 error: 236 _pthread_mutex_unlock(&sem_llock); 237 if (fd != -1) 238 _close(fd); 239 if (sem != NULL) 240 munmap(sem, sizeof(sem_t)); 241 free(ni); 242 return (SEM_FAILED); 243 } 244 245 int 246 _sem_close(sem_t *sem) 247 { 248 struct sem_nameinfo *ni; 249 250 if (sem_check_validity(sem) != 0) 251 return (-1); 252 253 if (!(sem->_kern._flags & SEM_NAMED)) { 254 errno = EINVAL; 255 return (-1); 256 } 257 258 _pthread_once(&once, sem_module_init); 259 260 _pthread_mutex_lock(&sem_llock); 261 LIST_FOREACH(ni, &sem_list, next) { 262 if (sem == ni->sem) { 263 if (--ni->open_count > 0) { 264 _pthread_mutex_unlock(&sem_llock); 265 return (0); 266 } 267 else 268 break; 269 } 270 } 271 272 if (ni) { 273 LIST_REMOVE(ni, next); 274 _pthread_mutex_unlock(&sem_llock); 275 munmap(sem, sizeof(*sem)); 276 free(ni); 277 return (0); 278 } 279 _pthread_mutex_unlock(&sem_llock); 280 errno = EINVAL; 281 return (-1); 282 } 283 284 int 285 _sem_unlink(const char *name) 286 { 287 char path[PATH_MAX]; 288 289 if (name[0] != '/') { 290 errno = ENOENT; 291 return -1; 292 } 293 name++; 294 295 strcpy(path, SEM_PREFIX); 296 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) { 297 errno = ENAMETOOLONG; 298 return (-1); 299 } 300 return unlink(path); 301 } 302 303 int 304 _sem_destroy(sem_t *sem) 305 { 306 307 if (sem_check_validity(sem) != 0) 308 return (-1); 309 310 if (sem->_kern._flags & SEM_NAMED) { 311 errno = EINVAL; 312 return (-1); 313 } 314 sem->_magic = 0; 315 return (0); 316 } 317 318 int 319 _sem_getvalue(sem_t * __restrict sem, int * __restrict sval) 320 { 321 322 if (sem_check_validity(sem) != 0) 323 return (-1); 324 325 *sval = (int)sem->_kern._count; 326 return (0); 327 } 328 329 static __inline int 330 usem_wake(struct _usem *sem) 331 { 332 if (!sem->_has_waiters) 333 return (0); 334 return _umtx_op(sem, UMTX_OP_SEM_WAKE, 0, NULL, NULL); 335 } 336 337 static __inline int 338 usem_wait(struct _usem *sem, const struct timespec *timeout) 339 { 340 if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 && 341 timeout->tv_nsec <= 0))) { 342 errno = ETIMEDOUT; 343 return (-1); 344 } 345 return _umtx_op(sem, UMTX_OP_SEM_WAIT, 0, NULL, 346 __DECONST(void*, timeout)); 347 } 348 349 int 350 _sem_trywait(sem_t *sem) 351 { 352 int val; 353 354 if (sem_check_validity(sem) != 0) 355 return (-1); 356 357 while ((val = sem->_kern._count) > 0) { 358 if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1)) 359 return (0); 360 } 361 errno = EAGAIN; 362 return (-1); 363 } 364 365 static void 366 sem_cancel_handler(void *arg) 367 { 368 sem_t *sem = arg; 369 370 if (sem->_kern._has_waiters && sem->_kern._count) 371 usem_wake(&sem->_kern); 372 } 373 374 #define TIMESPEC_SUB(dst, src, val) \ 375 do { \ 376 (dst)->tv_sec = (src)->tv_sec - (val)->tv_sec; \ 377 (dst)->tv_nsec = (src)->tv_nsec - (val)->tv_nsec; \ 378 if ((dst)->tv_nsec < 0) { \ 379 (dst)->tv_sec--; \ 380 (dst)->tv_nsec += 1000000000; \ 381 } \ 382 } while (0) 383 384 385 static __inline int 386 enable_async_cancel(void) 387 { 388 int old; 389 390 _pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old); 391 return (old); 392 } 393 394 static __inline void 395 restore_async_cancel(int val) 396 { 397 _pthread_setcanceltype(val, NULL); 398 } 399 400 int 401 _sem_timedwait(sem_t * __restrict sem, 402 const struct timespec * __restrict abstime) 403 { 404 struct timespec ts, ts2; 405 int val, retval, saved_cancel; 406 407 if (sem_check_validity(sem) != 0) 408 return (-1); 409 410 retval = 0; 411 for (;;) { 412 while ((val = sem->_kern._count) > 0) { 413 if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1)) 414 return (0); 415 } 416 417 if (retval) 418 break; 419 420 /* 421 * The timeout argument is only supposed to 422 * be checked if the thread would have blocked. 423 */ 424 if (abstime != NULL) { 425 if (abstime->tv_nsec >= 1000000000 || abstime->tv_nsec < 0) { 426 errno = EINVAL; 427 return (-1); 428 } 429 clock_gettime(CLOCK_REALTIME, &ts); 430 TIMESPEC_SUB(&ts2, abstime, &ts); 431 } 432 pthread_cleanup_push(sem_cancel_handler, sem); 433 saved_cancel = enable_async_cancel(); 434 retval = usem_wait(&sem->_kern, abstime ? &ts2 : NULL); 435 restore_async_cancel(saved_cancel); 436 pthread_cleanup_pop(0); 437 } 438 return (retval); 439 } 440 441 int 442 _sem_wait(sem_t *sem) 443 { 444 return _sem_timedwait(sem, NULL); 445 } 446 447 /* 448 * POSIX: 449 * The sem_post() interface is reentrant with respect to signals and may be 450 * invoked from a signal-catching function. 451 * The implementation does not use lock, so it should be safe. 452 */ 453 int 454 _sem_post(sem_t *sem) 455 { 456 457 if (sem_check_validity(sem) != 0) 458 return (-1); 459 460 atomic_add_rel_int(&sem->_kern._count, 1); 461 return usem_wake(&sem->_kern); 462 } 463