Lines Matching refs:self
154 ulwp_t *self = curthread; in forkx() local
155 uberdata_t *udp = self->ul_uberdata; in forkx()
158 if (self->ul_vfork) { in forkx()
172 self->ul_vfork = 0; in forkx()
177 sigoff(self); in forkx()
178 if (self->ul_fork) { in forkx()
182 sigon(self); in forkx()
186 self->ul_fork = 1; in forkx()
211 if (self->ul_siglink == NULL) in forkx()
225 block_all_signals(self); in forkx()
244 self->ul_schedctl_called = NULL; in forkx()
245 self->ul_schedctl = NULL; in forkx()
246 self->ul_cursig = 0; in forkx()
247 self->ul_siginfo.si_signo = 0; in forkx()
252 restore_signals(self); in forkx()
254 if (self->ul_siglink == NULL) in forkx()
259 restore_signals(self); in forkx()
261 if (self->ul_siglink == NULL) in forkx()
266 self->ul_fork = 0; in forkx()
267 sigon(self); in forkx()
291 ulwp_t *self = curthread; in forkallx() local
292 uberdata_t *udp = self->ul_uberdata; in forkallx()
295 if (self->ul_vfork) { in forkallx()
303 self->ul_vfork = 0; in forkallx()
308 sigoff(self); in forkallx()
309 if (self->ul_fork) { in forkallx()
310 sigon(self); in forkallx()
314 self->ul_fork = 1; in forkallx()
317 block_all_signals(self); in forkallx()
323 self->ul_schedctl_called = NULL; in forkallx()
324 self->ul_schedctl = NULL; in forkallx()
325 self->ul_cursig = 0; in forkallx()
326 self->ul_siginfo.si_signo = 0; in forkallx()
333 restore_signals(self); in forkallx()
336 self->ul_fork = 0; in forkallx()
337 sigon(self); in forkallx()
353 ulwp_t *self = curthread; \
355 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \
356 self->ul_critical | self->ul_sigdefer); \
359 self->ul_save_async = self->ul_cancel_async; \
360 if (!self->ul_cancel_disabled) { \
361 self->ul_cancel_async = 1; \
362 if (self->ul_cancel_pending) \
365 self->ul_sp = stkptr(); \
366 } else if (self->ul_cancel_pending && \
367 !self->ul_cancel_disabled) { \
368 set_cancel_eintr_flag(self); \
374 self->ul_sp = 0; \
375 self->ul_cancel_async = self->ul_save_async; \
389 *self->ul_errnop = EINTR; \
413 ulwp_t *self = curthread; \
415 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | \
416 self->ul_critical | self->ul_sigdefer); \
417 if (!self->ul_vfork) { \
419 block_all_signals(self); \
420 self->ul_tmpmask = *sigmask; \
421 delete_reserved_signals(&self->ul_tmpmask); \
422 self->ul_sigsuspend = 1; \
425 self->ul_save_async = self->ul_cancel_async; \
426 if (!self->ul_cancel_disabled) { \
427 self->ul_cancel_async = 1; \
428 if (self->ul_cancel_pending) { \
429 if (self->ul_sigsuspend) { \
430 self->ul_sigsuspend = 0;\
431 restore_signals(self); \
436 self->ul_sp = stkptr(); \
448 self->ul_sp = 0; \
449 self->ul_cancel_async = self->ul_save_async; \
451 if (self->ul_sigsuspend) { \
452 self->ul_sigsuspend = 0; \
453 restore_signals(self); \
464 ulwp_t *self = curthread; in _cancel_prologue() local
466 self->ul_cancel_prologue = in _cancel_prologue()
467 (self->ul_vfork | self->ul_nocancel | self->ul_libc_locks | in _cancel_prologue()
468 self->ul_critical | self->ul_sigdefer) != 0; in _cancel_prologue()
469 if (self->ul_cancel_prologue == 0) { in _cancel_prologue()
470 self->ul_save_async = self->ul_cancel_async; in _cancel_prologue()
471 if (!self->ul_cancel_disabled) { in _cancel_prologue()
472 self->ul_cancel_async = 1; in _cancel_prologue()
473 if (self->ul_cancel_pending) in _cancel_prologue()
476 self->ul_sp = stkptr(); in _cancel_prologue()
477 } else if (self->ul_cancel_pending && in _cancel_prologue()
478 !self->ul_cancel_disabled) { in _cancel_prologue()
479 set_cancel_eintr_flag(self); in _cancel_prologue()
486 ulwp_t *self = curthread; in _cancel_epilogue() local
488 if (self->ul_cancel_prologue == 0) { in _cancel_epilogue()
489 self->ul_sp = 0; in _cancel_epilogue()
490 self->ul_cancel_async = self->ul_save_async; in _cancel_epilogue()
1047 *self->ul_errnop = EINTR; in sigtimedwait()
1054 *self->ul_errnop = EINTR; in sigtimedwait()