1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 24 * Use is subject to license terms. 25 */ 26 27 #include "lint.h" 28 #include "thr_uberdata.h" 29 30 static uint32_t _semvaluemax; 31 32 /* 33 * Check to see if anyone is waiting for this semaphore. 34 */ 35 #pragma weak _sema_held = sema_held 36 int 37 sema_held(sema_t *sp) 38 { 39 return (sp->count == 0); 40 } 41 42 #pragma weak _sema_init = sema_init 43 int 44 sema_init(sema_t *sp, unsigned int count, int type, void *arg __unused) 45 { 46 if (_semvaluemax == 0) 47 _semvaluemax = (uint32_t)_sysconf(_SC_SEM_VALUE_MAX); 48 if ((type != USYNC_THREAD && type != USYNC_PROCESS) || 49 (count > _semvaluemax)) 50 return (EINVAL); 51 (void) memset(sp, 0, sizeof (*sp)); 52 sp->count = count; 53 sp->type = (uint16_t)type; 54 sp->magic = SEMA_MAGIC; 55 56 /* 57 * This should be at the beginning of the function, 58 * but for the sake of old broken applications that 59 * do not have proper alignment for their semaphores 60 * (and don't check the return code from sema_init), 61 * we put it here, after initializing the semaphore regardless. 62 */ 63 if (((uintptr_t)sp & (_LONG_LONG_ALIGNMENT - 1)) && 64 curthread->ul_misaligned == 0) 65 return (EINVAL); 66 67 return (0); 68 } 69 70 #pragma weak _sema_destroy = sema_destroy 71 int 72 sema_destroy(sema_t *sp) 73 { 74 sp->magic = 0; 75 tdb_sync_obj_deregister(sp); 76 return (0); 77 } 78 79 static int 80 sema_wait_impl(sema_t *sp, timespec_t *tsp) 81 { 82 lwp_sema_t *lsp = (lwp_sema_t *)sp; 83 ulwp_t *self = curthread; 84 uberdata_t *udp = self->ul_uberdata; 85 tdb_sema_stats_t *ssp = SEMA_STATS(sp, udp); 86 hrtime_t begin_sleep = 0; 87 uint_t count; 88 int error = 0; 89 90 /* 91 * All variations of sema_wait() are cancellation points. 92 */ 93 _cancelon(); 94 95 if (ssp) 96 tdb_incr(ssp->sema_wait); 97 98 self->ul_sp = stkptr(); 99 self->ul_wchan = lsp; 100 if (__td_event_report(self, TD_SLEEP, udp)) { 101 self->ul_td_evbuf.eventnum = TD_SLEEP; 102 self->ul_td_evbuf.eventdata = lsp; 103 tdb_event(TD_SLEEP, udp); 104 } 105 /* just a guess, but it looks like we will sleep */ 106 if (ssp && lsp->count == 0) { 107 begin_sleep = gethrtime(); 108 if (lsp->count == 0) /* still looks like sleep */ 109 tdb_incr(ssp->sema_wait_sleep); 110 else /* we changed our mind */ 111 begin_sleep = 0; 112 } 113 114 if (lsp->type == USYNC_PROCESS) { /* kernel-level */ 115 set_parking_flag(self, 1); 116 if (self->ul_cursig != 0 || 117 (self->ul_cancelable && self->ul_cancel_pending)) 118 set_parking_flag(self, 0); 119 /* the kernel always does FIFO queueing */ 120 error = ___lwp_sema_timedwait(lsp, tsp, 1); 121 set_parking_flag(self, 0); 122 } else if (!udp->uberflags.uf_mt && /* single threaded */ 123 lsp->count != 0) { /* and non-blocking */ 124 /* 125 * Since we are single-threaded, we don't need the 126 * protection of queue_lock(). However, we do need 127 * to block signals while modifying the count. 128 */ 129 sigoff(self); 130 lsp->count--; 131 sigon(self); 132 } else { /* multithreaded or blocking */ 133 queue_head_t *qp; 134 ulwp_t *ulwp; 135 lwpid_t lwpid = 0; 136 137 qp = queue_lock(lsp, CV); 138 while (error == 0 && lsp->count == 0) { 139 /* 140 * SUSV3 requires FIFO queueing for semaphores, 141 * at least for SCHED_FIFO and SCHED_RR scheduling. 142 */ 143 enqueue(qp, self, 1); 144 lsp->sema_waiters = 1; 145 set_parking_flag(self, 1); 146 queue_unlock(qp); 147 /* 148 * We may have received SIGCANCEL before we 149 * called queue_lock(). If so and we are 150 * cancelable we should return EINTR. 151 */ 152 if (self->ul_cursig != 0 || 153 (self->ul_cancelable && self->ul_cancel_pending)) 154 set_parking_flag(self, 0); 155 error = __lwp_park(tsp, 0); 156 set_parking_flag(self, 0); 157 qp = queue_lock(lsp, CV); 158 if (self->ul_sleepq) /* timeout or spurious wakeup */ 159 lsp->sema_waiters = dequeue_self(qp); 160 } 161 if (error == 0) 162 lsp->count--; 163 if (lsp->count != 0 && lsp->sema_waiters) { 164 int more; 165 if ((ulwp = dequeue(qp, &more)) != NULL) { 166 no_preempt(self); 167 lwpid = ulwp->ul_lwpid; 168 } 169 lsp->sema_waiters = more; 170 } 171 queue_unlock(qp); 172 if (lwpid) { 173 (void) __lwp_unpark(lwpid); 174 preempt(self); 175 } 176 } 177 178 self->ul_wchan = NULL; 179 self->ul_sp = 0; 180 if (ssp) { 181 if (error == 0) { 182 /* we just decremented the count */ 183 count = lsp->count; 184 if (ssp->sema_min_count > count) 185 ssp->sema_min_count = count; 186 } 187 if (begin_sleep) 188 ssp->sema_wait_sleep_time += gethrtime() - begin_sleep; 189 } 190 191 if (error == EINTR) 192 _canceloff(); 193 else 194 _canceloff_nocancel(); 195 return (error); 196 } 197 198 #pragma weak _sema_wait = sema_wait 199 int 200 sema_wait(sema_t *sp) 201 { 202 ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 203 return (sema_wait_impl(sp, NULL)); 204 } 205 206 int 207 sema_reltimedwait(sema_t *sp, const timespec_t *reltime) 208 { 209 timespec_t tslocal = *reltime; 210 211 ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 212 return (sema_wait_impl(sp, &tslocal)); 213 } 214 215 int 216 sema_timedwait(sema_t *sp, const timespec_t *abstime) 217 { 218 timespec_t tslocal; 219 220 ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 221 abstime_to_reltime(CLOCK_REALTIME, abstime, &tslocal); 222 return (sema_wait_impl(sp, &tslocal)); 223 } 224 225 #pragma weak _sema_trywait = sema_trywait 226 int 227 sema_trywait(sema_t *sp) 228 { 229 lwp_sema_t *lsp = (lwp_sema_t *)sp; 230 ulwp_t *self = curthread; 231 uberdata_t *udp = self->ul_uberdata; 232 tdb_sema_stats_t *ssp = SEMA_STATS(sp, udp); 233 uint_t count; 234 int error = 0; 235 236 ASSERT(!curthread->ul_critical || curthread->ul_bindflags); 237 238 if (ssp) 239 tdb_incr(ssp->sema_trywait); 240 241 if (lsp->type == USYNC_PROCESS) { /* kernel-level */ 242 error = _lwp_sema_trywait(lsp); 243 } else if (!udp->uberflags.uf_mt) { /* single threaded */ 244 sigoff(self); 245 if (lsp->count == 0) 246 error = EBUSY; 247 else 248 lsp->count--; 249 sigon(self); 250 } else { /* multithreaded */ 251 queue_head_t *qp; 252 ulwp_t *ulwp; 253 lwpid_t lwpid = 0; 254 255 qp = queue_lock(lsp, CV); 256 if (lsp->count == 0) 257 error = EBUSY; 258 else if (--lsp->count != 0 && lsp->sema_waiters) { 259 int more; 260 if ((ulwp = dequeue(qp, &more)) != NULL) { 261 no_preempt(self); 262 lwpid = ulwp->ul_lwpid; 263 } 264 lsp->sema_waiters = more; 265 } 266 queue_unlock(qp); 267 if (lwpid) { 268 (void) __lwp_unpark(lwpid); 269 preempt(self); 270 } 271 } 272 273 if (error == 0) { 274 if (ssp) { 275 /* we just decremented the count */ 276 count = lsp->count; 277 if (ssp->sema_min_count > count) 278 ssp->sema_min_count = count; 279 } 280 } else { 281 if (ssp) 282 tdb_incr(ssp->sema_trywait_fail); 283 if (__td_event_report(self, TD_LOCK_TRY, udp)) { 284 self->ul_td_evbuf.eventnum = TD_LOCK_TRY; 285 tdb_event(TD_LOCK_TRY, udp); 286 } 287 } 288 289 return (error); 290 } 291 292 #pragma weak _sema_post = sema_post 293 int 294 sema_post(sema_t *sp) 295 { 296 lwp_sema_t *lsp = (lwp_sema_t *)sp; 297 ulwp_t *self = curthread; 298 uberdata_t *udp = self->ul_uberdata; 299 tdb_sema_stats_t *ssp = SEMA_STATS(sp, udp); 300 uint_t count; 301 int error = 0; 302 303 if (ssp) 304 tdb_incr(ssp->sema_post); 305 if (_semvaluemax == 0) 306 _semvaluemax = (uint32_t)_sysconf(_SC_SEM_VALUE_MAX); 307 308 if (lsp->type == USYNC_PROCESS) { /* kernel-level */ 309 error = _lwp_sema_post(lsp); 310 } else if (!udp->uberflags.uf_mt) { /* single threaded */ 311 sigoff(self); 312 if (lsp->count >= _semvaluemax) 313 error = EOVERFLOW; 314 else 315 lsp->count++; 316 sigon(self); 317 } else { /* multithreaded */ 318 queue_head_t *qp; 319 ulwp_t *ulwp; 320 lwpid_t lwpid = 0; 321 322 qp = queue_lock(lsp, CV); 323 if (lsp->count >= _semvaluemax) 324 error = EOVERFLOW; 325 else if (lsp->count++ == 0 && lsp->sema_waiters) { 326 int more; 327 if ((ulwp = dequeue(qp, &more)) != NULL) { 328 no_preempt(self); 329 lwpid = ulwp->ul_lwpid; 330 } 331 lsp->sema_waiters = more; 332 } 333 queue_unlock(qp); 334 if (lwpid) { 335 (void) __lwp_unpark(lwpid); 336 preempt(self); 337 } 338 } 339 340 if (error == 0) { 341 if (ssp) { 342 /* we just incremented the count */ 343 count = lsp->count; 344 if (ssp->sema_max_count < count) 345 ssp->sema_max_count = count; 346 } 347 } 348 349 return (error); 350 } 351