1 /* 2 * Copyright (C) 2010 David Xu <davidxu@freebsd.org>. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice(s), this list of conditions and the following disclaimer as 10 * the first lines of this file unmodified other than the possible 11 * addition of one or more copyright notices. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice(s), this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY 18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) BE 21 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 24 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 26 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 #include "namespace.h" 33 #include <sys/types.h> 34 #include <sys/queue.h> 35 #include <sys/mman.h> 36 #include <sys/stat.h> 37 #include <errno.h> 38 #include <machine/atomic.h> 39 #include <sys/umtx.h> 40 #include <limits.h> 41 #include <fcntl.h> 42 #include <pthread.h> 43 #include <stdarg.h> 44 #include <stdlib.h> 45 #include <string.h> 46 #include <time.h> 47 #include <semaphore.h> 48 #include <unistd.h> 49 #include "un-namespace.h" 50 #include "libc_private.h" 51 52 __weak_reference(_sem_close, sem_close); 53 __weak_reference(_sem_destroy, sem_destroy); 54 __weak_reference(_sem_getvalue, sem_getvalue); 55 __weak_reference(_sem_init, sem_init); 56 __weak_reference(_sem_open, sem_open); 57 __weak_reference(_sem_post, sem_post); 58 __weak_reference(_sem_timedwait, sem_timedwait); 59 __weak_reference(_sem_clockwait_np, sem_clockwait_np); 60 __weak_reference(_sem_trywait, sem_trywait); 61 __weak_reference(_sem_unlink, sem_unlink); 62 __weak_reference(_sem_wait, sem_wait); 63 64 #define SEM_PREFIX "/tmp/SEMD" 65 #define SEM_MAGIC ((u_int32_t)0x73656d32) 66 67 _Static_assert(SEM_VALUE_MAX <= USEM_MAX_COUNT, "SEM_VALUE_MAX too large"); 68 69 struct sem_nameinfo { 70 int open_count; 71 char *name; 72 dev_t dev; 73 ino_t ino; 74 sem_t *sem; 75 LIST_ENTRY(sem_nameinfo) next; 76 }; 77 78 static pthread_once_t once = PTHREAD_ONCE_INIT; 79 static pthread_mutex_t sem_llock; 80 static LIST_HEAD(,sem_nameinfo) sem_list = LIST_HEAD_INITIALIZER(sem_list); 81 82 static void 83 sem_prefork() 84 { 85 86 _pthread_mutex_lock(&sem_llock); 87 } 88 89 static void 90 sem_postfork() 91 { 92 _pthread_mutex_unlock(&sem_llock); 93 } 94 95 static void 96 sem_child_postfork() 97 { 98 _pthread_mutex_unlock(&sem_llock); 99 } 100 101 static void 102 sem_module_init(void) 103 { 104 pthread_mutexattr_t ma; 105 106 _pthread_mutexattr_init(&ma); 107 _pthread_mutexattr_settype(&ma, PTHREAD_MUTEX_RECURSIVE); 108 _pthread_mutex_init(&sem_llock, &ma); 109 _pthread_mutexattr_destroy(&ma); 110 _pthread_atfork(sem_prefork, sem_postfork, sem_child_postfork); 111 } 112 113 static inline int 114 sem_check_validity(sem_t *sem) 115 { 116 117 if (sem->_magic == SEM_MAGIC) 118 return (0); 119 else { 120 errno = EINVAL; 121 return (-1); 122 } 123 } 124 125 int 126 _sem_init(sem_t *sem, int pshared, unsigned int value) 127 { 128 129 if (value > SEM_VALUE_MAX) { 130 errno = EINVAL; 131 return (-1); 132 } 133 134 bzero(sem, sizeof(sem_t)); 135 sem->_magic = SEM_MAGIC; 136 sem->_kern._count = (u_int32_t)value; 137 sem->_kern._flags = pshared ? USYNC_PROCESS_SHARED : 0; 138 return (0); 139 } 140 141 sem_t * 142 _sem_open(const char *name, int flags, ...) 143 { 144 char path[PATH_MAX]; 145 146 struct stat sb; 147 va_list ap; 148 struct sem_nameinfo *ni = NULL; 149 sem_t *sem = NULL; 150 int fd = -1, mode, len, errsave; 151 int value = 0; 152 153 if (name[0] != '/') { 154 errno = EINVAL; 155 return (SEM_FAILED); 156 } 157 name++; 158 strcpy(path, SEM_PREFIX); 159 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) { 160 errno = ENAMETOOLONG; 161 return (SEM_FAILED); 162 } 163 if (flags & ~(O_CREAT|O_EXCL)) { 164 errno = EINVAL; 165 return (SEM_FAILED); 166 } 167 if ((flags & O_CREAT) != 0) { 168 va_start(ap, flags); 169 mode = va_arg(ap, int); 170 value = va_arg(ap, int); 171 va_end(ap); 172 } 173 fd = -1; 174 _pthread_once(&once, sem_module_init); 175 176 _pthread_mutex_lock(&sem_llock); 177 LIST_FOREACH(ni, &sem_list, next) { 178 if (ni->name != NULL && strcmp(name, ni->name) == 0) { 179 fd = _open(path, flags | O_RDWR | O_CLOEXEC | 180 O_EXLOCK, mode); 181 if (fd == -1 || _fstat(fd, &sb) == -1) { 182 ni = NULL; 183 goto error; 184 } 185 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | 186 O_EXCL) || ni->dev != sb.st_dev || 187 ni->ino != sb.st_ino) { 188 ni->name = NULL; 189 ni = NULL; 190 break; 191 } 192 ni->open_count++; 193 sem = ni->sem; 194 _pthread_mutex_unlock(&sem_llock); 195 _close(fd); 196 return (sem); 197 } 198 } 199 200 len = sizeof(*ni) + strlen(name) + 1; 201 ni = (struct sem_nameinfo *)malloc(len); 202 if (ni == NULL) { 203 errno = ENOSPC; 204 goto error; 205 } 206 207 ni->name = (char *)(ni+1); 208 strcpy(ni->name, name); 209 210 if (fd == -1) { 211 fd = _open(path, flags | O_RDWR | O_CLOEXEC | O_EXLOCK, mode); 212 if (fd == -1 || _fstat(fd, &sb) == -1) 213 goto error; 214 } 215 if (sb.st_size < sizeof(sem_t)) { 216 sem_t tmp; 217 218 tmp._magic = SEM_MAGIC; 219 tmp._kern._count = value; 220 tmp._kern._flags = USYNC_PROCESS_SHARED | SEM_NAMED; 221 if (_write(fd, &tmp, sizeof(tmp)) != sizeof(tmp)) 222 goto error; 223 } 224 flock(fd, LOCK_UN); 225 sem = (sem_t *)mmap(NULL, sizeof(sem_t), PROT_READ|PROT_WRITE, 226 MAP_SHARED|MAP_NOSYNC, fd, 0); 227 if (sem == MAP_FAILED) { 228 sem = NULL; 229 if (errno == ENOMEM) 230 errno = ENOSPC; 231 goto error; 232 } 233 if (sem->_magic != SEM_MAGIC) { 234 errno = EINVAL; 235 goto error; 236 } 237 ni->open_count = 1; 238 ni->sem = sem; 239 ni->dev = sb.st_dev; 240 ni->ino = sb.st_ino; 241 LIST_INSERT_HEAD(&sem_list, ni, next); 242 _close(fd); 243 _pthread_mutex_unlock(&sem_llock); 244 return (sem); 245 246 error: 247 errsave = errno; 248 if (fd != -1) 249 _close(fd); 250 if (sem != NULL) 251 munmap(sem, sizeof(sem_t)); 252 free(ni); 253 _pthread_mutex_unlock(&sem_llock); 254 errno = errsave; 255 return (SEM_FAILED); 256 } 257 258 int 259 _sem_close(sem_t *sem) 260 { 261 struct sem_nameinfo *ni; 262 263 if (sem_check_validity(sem) != 0) 264 return (-1); 265 266 if (!(sem->_kern._flags & SEM_NAMED)) { 267 errno = EINVAL; 268 return (-1); 269 } 270 271 _pthread_once(&once, sem_module_init); 272 273 _pthread_mutex_lock(&sem_llock); 274 LIST_FOREACH(ni, &sem_list, next) { 275 if (sem == ni->sem) { 276 if (--ni->open_count > 0) { 277 _pthread_mutex_unlock(&sem_llock); 278 return (0); 279 } 280 else 281 break; 282 } 283 } 284 285 if (ni) { 286 LIST_REMOVE(ni, next); 287 _pthread_mutex_unlock(&sem_llock); 288 munmap(sem, sizeof(*sem)); 289 free(ni); 290 return (0); 291 } 292 _pthread_mutex_unlock(&sem_llock); 293 errno = EINVAL; 294 return (-1); 295 } 296 297 int 298 _sem_unlink(const char *name) 299 { 300 char path[PATH_MAX]; 301 302 if (name[0] != '/') { 303 errno = ENOENT; 304 return -1; 305 } 306 name++; 307 strcpy(path, SEM_PREFIX); 308 if (strlcat(path, name, sizeof(path)) >= sizeof(path)) { 309 errno = ENAMETOOLONG; 310 return (-1); 311 } 312 313 return (unlink(path)); 314 } 315 316 int 317 _sem_destroy(sem_t *sem) 318 { 319 320 if (sem_check_validity(sem) != 0) 321 return (-1); 322 323 if (sem->_kern._flags & SEM_NAMED) { 324 errno = EINVAL; 325 return (-1); 326 } 327 sem->_magic = 0; 328 return (0); 329 } 330 331 int 332 _sem_getvalue(sem_t * __restrict sem, int * __restrict sval) 333 { 334 335 if (sem_check_validity(sem) != 0) 336 return (-1); 337 338 *sval = (int)USEM_COUNT(sem->_kern._count); 339 return (0); 340 } 341 342 static __inline int 343 usem_wake(struct _usem2 *sem) 344 { 345 return _umtx_op(sem, UMTX_OP_SEM2_WAKE, 0, NULL, NULL); 346 } 347 348 static __inline int 349 usem_wait(struct _usem2 *sem, clockid_t clock_id, int flags, 350 const struct timespec *rqtp, struct timespec *rmtp) 351 { 352 struct { 353 struct _umtx_time timeout; 354 struct timespec remain; 355 } tms; 356 void *tm_p; 357 size_t tm_size; 358 int retval; 359 360 if (rqtp == NULL) { 361 tm_p = NULL; 362 tm_size = 0; 363 } else { 364 tms.timeout._clockid = clock_id; 365 tms.timeout._flags = (flags & TIMER_ABSTIME) ? UMTX_ABSTIME : 0; 366 tms.timeout._timeout = *rqtp; 367 tm_p = &tms; 368 tm_size = sizeof(tms); 369 } 370 retval = _umtx_op(sem, UMTX_OP_SEM2_WAIT, 0, (void *)tm_size, tm_p); 371 if (retval == -1 && errno == EINTR && (flags & TIMER_ABSTIME) == 0 && 372 rqtp != NULL && rmtp != NULL) { 373 *rmtp = tms.remain; 374 } 375 376 return (retval); 377 } 378 379 int 380 _sem_trywait(sem_t *sem) 381 { 382 int val; 383 384 if (sem_check_validity(sem) != 0) 385 return (-1); 386 387 while (USEM_COUNT(val = sem->_kern._count) > 0) { 388 if (atomic_cmpset_acq_int(&sem->_kern._count, val, val - 1)) 389 return (0); 390 } 391 errno = EAGAIN; 392 return (-1); 393 } 394 395 int 396 _sem_clockwait_np(sem_t * __restrict sem, clockid_t clock_id, int flags, 397 const struct timespec *rqtp, struct timespec *rmtp) 398 { 399 int val, retval; 400 401 if (sem_check_validity(sem) != 0) 402 return (-1); 403 404 retval = 0; 405 _pthread_testcancel(); 406 for (;;) { 407 while (USEM_COUNT(val = sem->_kern._count) > 0) { 408 if (atomic_cmpset_acq_int(&sem->_kern._count, val, 409 val - 1)) 410 return (0); 411 } 412 413 if (retval) { 414 _pthread_testcancel(); 415 break; 416 } 417 418 /* 419 * The timeout argument is only supposed to 420 * be checked if the thread would have blocked. 421 */ 422 if (rqtp != NULL) { 423 if (rqtp->tv_nsec >= 1000000000 || rqtp->tv_nsec < 0) { 424 errno = EINVAL; 425 return (-1); 426 } 427 } 428 _pthread_cancel_enter(1); 429 retval = usem_wait(&sem->_kern, clock_id, flags, rqtp, rmtp); 430 _pthread_cancel_leave(0); 431 } 432 return (retval); 433 } 434 435 int 436 _sem_timedwait(sem_t * __restrict sem, 437 const struct timespec * __restrict abstime) 438 { 439 return (_sem_clockwait_np(sem, CLOCK_REALTIME, TIMER_ABSTIME, abstime, 440 NULL)); 441 }; 442 443 int 444 _sem_wait(sem_t *sem) 445 { 446 return _sem_timedwait(sem, NULL); 447 } 448 449 /* 450 * POSIX: 451 * The sem_post() interface is reentrant with respect to signals and may be 452 * invoked from a signal-catching function. 453 * The implementation does not use lock, so it should be safe. 454 */ 455 int 456 _sem_post(sem_t *sem) 457 { 458 unsigned int count; 459 460 if (sem_check_validity(sem) != 0) 461 return (-1); 462 463 do { 464 count = sem->_kern._count; 465 if (USEM_COUNT(count) + 1 > SEM_VALUE_MAX) { 466 errno = EOVERFLOW; 467 return (-1); 468 } 469 } while (!atomic_cmpset_rel_int(&sem->_kern._count, count, count + 1)); 470 if (count & USEM_HAS_WAITERS) 471 usem_wake(&sem->_kern); 472 return (0); 473 } 474