/* * Please do not edit this file. * It was generated using rpcgen. */ #include "rwall.h" #include #include /* getenv, exit */ #include #include #include #include #include #include /* rlimit */ #include #ifdef DEBUG #define RPC_SVC_FG #endif #define _RPCSVC_CLOSEDOWN 120 /* * Copyright 2005 Sun Microsystems, Inc. All rights reserved. * Use is subject to license terms. * * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License, Version 1.0 only * (the "License"). You may not use this file except in compliance * with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* from rwall.x */ /* * Remote write-all ONC service */ /* * Server side stub routines for the rpc.rwalld daemon */ static int _rpcpmstart; /* Started by a port monitor ? */ /* States a server can be in wrt request */ #define _IDLE 0 #define _SERVED 1 static int _rpcsvcstate = _IDLE; /* Set when a request is serviced */ static int _rpcsvccount = 0; /* Number of requests being serviced */ mutex_t _svcstate_lock; /* lock for _rpcsvcstate, _rpcsvccount */ #if defined(RPC_MSGOUT) extern void RPC_MSGOUT(); #else /* defined(RPC_MSGOUT) */ static void RPC_MSGOUT(fmt, msg) char *fmt; char *msg; { #ifdef RPC_SVC_FG if (_rpcpmstart) syslog(LOG_ERR, fmt, msg); else { (void) fprintf(stderr, fmt, msg); (void) putc('\n', stderr); } #else syslog(LOG_ERR, fmt, msg); #endif } #endif /* defined(RPC_MSGOUT) */ /*ARGSUSED*/ static void * closedown(arg) void *arg; { /*CONSTCOND*/ while (1) { (void) sleep(_RPCSVC_CLOSEDOWN/2); if (mutex_trylock(&_svcstate_lock) != 0) continue; if (_rpcsvcstate == _IDLE && _rpcsvccount == 0) { int size; int i, openfd = 0; size = svc_max_pollfd; for (i = 0; i < size && openfd < 2; i++) if (svc_pollfd[i].fd >= 0) openfd++; if (openfd <= 1) exit(0); } else _rpcsvcstate = _IDLE; (void) mutex_unlock(&_svcstate_lock); } } static void wallprog_1(rqstp, transp) struct svc_req *rqstp; register SVCXPRT *transp; { union { wrapstring wallproc_wall_1_arg; } argument; union { int fill; } result; bool_t retval; bool_t (*_xdr_argument)(), (*_xdr_result)(); bool_t (*local)(); (void) mutex_lock(&_svcstate_lock); _rpcsvccount++; (void) mutex_unlock(&_svcstate_lock); switch (rqstp->rq_proc) { case NULLPROC: (void) svc_sendreply(transp, xdr_void, NULL); (void) mutex_lock(&_svcstate_lock); _rpcsvccount--; _rpcsvcstate = _SERVED; (void) mutex_unlock(&_svcstate_lock); return; /* CSTYLED */ case WALLPROC_WALL: _xdr_argument = xdr_wrapstring; _xdr_result = xdr_void; local = (bool_t (*)()) wallproc_wall_1_svc; break; default: svcerr_noproc(transp); (void) mutex_lock(&_svcstate_lock); _rpcsvccount--; _rpcsvcstate = _SERVED; (void) mutex_unlock(&_svcstate_lock); return; /* CSTYLED */ } (void) memset((char *)&argument, 0, sizeof (argument)); if (!svc_getargs(transp, _xdr_argument, (caddr_t)&argument)) { svcerr_decode(transp); (void) mutex_lock(&_svcstate_lock); _rpcsvccount--; _rpcsvcstate = _SERVED; (void) mutex_unlock(&_svcstate_lock); return; /* CSTYLED */ } retval = (bool_t)(*local)(&argument, &result, rqstp); if (_xdr_result && retval > 0 && !svc_sendreply(transp, _xdr_result, (char *)&result)) { svcerr_systemerr(transp); } if (!svc_freeargs(transp, _xdr_argument, (caddr_t)&argument)) { RPC_MSGOUT("%s", "unable to free arguments"); exit(1); } if (_xdr_result != NULL) { if (!wallprog_1_freeresult(transp, _xdr_result, (caddr_t)&result)) RPC_MSGOUT("%s", "unable to free results"); } (void) mutex_lock(&_svcstate_lock); _rpcsvccount--; _rpcsvcstate = _SERVED; (void) mutex_unlock(&_svcstate_lock); return; /* CSTYLED */ } int main() { pid_t pid; int i; int mode = RPC_SVC_MT_AUTO; if (!rpc_control(RPC_SVC_MTMODE_SET, &mode)) { RPC_MSGOUT("%s", "unable to set automatic MT mode."); exit(1); } mutex_init(&_svcstate_lock, USYNC_THREAD, NULL); (void) sigset(SIGPIPE, SIG_IGN); /* * If stdin looks like a TLI endpoint, we assume * that we were started by a port monitor. If * t_getstate fails with TBADF, this is not a * TLI endpoint. */ if (t_getstate(0) != -1 || t_errno != TBADF) { char *netid; struct netconfig *nconf = NULL; SVCXPRT *transp; int pmclose; _rpcpmstart = 1; openlog("rwall", LOG_PID, LOG_DAEMON); if ((netid = getenv("NLSPROVIDER")) == NULL) { /* started from inetd */ pmclose = 1; } else { if ((nconf = getnetconfigent(netid)) == NULL) RPC_MSGOUT("%s", "cannot get transport info"); pmclose = (t_getstate(0) != T_DATAXFER); } if ((transp = svc_tli_create(0, nconf, NULL, 0, 0)) == NULL) { RPC_MSGOUT("%s", "cannot create server handle"); exit(1); } if (nconf) freenetconfigent(nconf); if (!svc_reg(transp, WALLPROG, WALLVERS, wallprog_1, 0)) { RPC_MSGOUT("%s", "unable to register (WALLPROG, WALLVERS)."); exit(1); } if (pmclose) { if (thr_create(NULL, 0, closedown, NULL, 0, NULL) != 0) { RPC_MSGOUT("%s", "cannot create closedown thread"); exit(1); } } svc_run(); exit(1); /* NOTREACHED */ } else { #ifndef RPC_SVC_FG #pragma weak closefrom extern void closefrom(); int size; struct rlimit rl; pid = fork(); if (pid < 0) { perror("cannot fork"); exit(1); } if (pid) exit(0); if (closefrom != NULL) closefrom(0); else { rl.rlim_max = 0; getrlimit(RLIMIT_NOFILE, &rl); if ((size = rl.rlim_max) == 0) exit(1); for (i = 0; i < size; i++) (void) close(i); } i = open("/dev/null", 2); (void) dup2(i, 1); (void) dup2(i, 2); setsid(); openlog("rwall", LOG_PID, LOG_DAEMON); #endif } if (!svc_create( wallprog_1, WALLPROG, WALLVERS, "datagram_v")) { RPC_MSGOUT("%s", "unable to create (WALLPROG, WALLVERS) for datagram_v."); exit(1); } svc_run(); RPC_MSGOUT("%s", "svc_run returned"); exit(1); /* NOTREACHED */ }