xref: /illumos-gate/usr/src/lib/libc/port/gen/getnetgrent.c (revision 1da57d551424de5a9d469760be7c4b4d4f10a755)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * getnetgrent.c
29  *
30  *	- name-service switch frontend routines for the netgroup API.
31  *
32  * Policy decision:
33  *	If netgroup A refers to netgroup B, both must occur in the same
34  *	source (any other choice gives very confusing semantics).  This
35  *	assumption is deeply embedded in the code below and in the backends.
36  *
37  * innetgr() is implemented on top of something called __multi_innetgr(),
38  * which replaces each (char *) argument of innetgr() with a counted vector
39  * of (char *).  The semantics are the same as an OR of the results of
40  * innetgr() operations on each possible 4-tuple picked from the arguments,
41  * but it's possible to implement some cases more efficiently.  This is
42  * important for mountd, which used to read YP netgroup.byhost directly in
43  * order to determine efficiently whether a given host belonged to any one
44  * of a long list of netgroups.  Wildcarded arguments are indicated by a
45  * count of zero.
46  */
47 
48 #include "lint.h"
49 #include <string.h>
50 #include <synch.h>
51 #include <nss_dbdefs.h>
52 #include <mtlib.h>
53 #include <libc.h>
54 
55 static DEFINE_NSS_DB_ROOT(db_root);
56 
57 void
_nss_initf_netgroup(p)58 _nss_initf_netgroup(p)
59 	nss_db_params_t	*p;
60 {
61 	p->name	= NSS_DBNAM_NETGROUP;
62 	p->default_config = NSS_DEFCONF_NETGROUP;
63 }
64 
65 /*
66  * The netgroup routines aren't quite like the majority of the switch clients.
67  *   innetgr() more-or-less fits the getXXXbyYYY mould, but for the others:
68  *	- setnetgrent("netgroup") is really a getXXXbyYYY routine, i.e. it
69  *	  searches the sources until it finds an entry with the given name.
70  *	  Rather than returning the (potentially large) entry, it simply
71  *	  initializes a cursor, and then...
72  *      - getnetgrent(...) is repeatedly invoked by the user to extract the
73  *	  contents of the entry found by setnetgrent().
74  *	- endnetgrent() is almost like a real endXXXent routine.
75  * The behaviour in NSS was:
76  *  If we were certain that all the backends could provide netgroup information
77  *  in a common form, we could make the setnetgrent() backend return the entire
78  *  entry to the frontend, then implement getnetgrent() and endnetgrent()
79  *  strictly in the frontend (aka here).  But we're not certain, so we won't.
80  * In NSS2:
81  *  Since nscd returns the results, and it is nscd that accumulates
82  *  the results, then we can return the entire result on the setnetgrent.
83  *
84  * NOTE:
85  *	In the SunOS 4.x (YP) version of this code, innetgr() did not
86  *	affect the state of {set,get,end}netgrent().  Somewhere out
87  *	there probably lurks a program that depends on this behaviour,
88  *	so this version (both frontend and backends) had better
89  *	behave the same way.
90  */
91 
92 /* ===> ?? fix "__" name */
93 int
__multi_innetgr(ngroup,pgroup,nhost,phost,nuser,puser,ndomain,pdomain)94 __multi_innetgr(ngroup,	pgroup,
95 		nhost,	phost,
96 		nuser,	puser,
97 		ndomain, pdomain)
98 	nss_innetgr_argc	ngroup, nhost, nuser, ndomain;
99 	nss_innetgr_argv	pgroup, phost, puser, pdomain;
100 {
101 	struct nss_innetgr_args	ia;
102 
103 	if (ngroup == 0) {
104 		return (0);	/* One thing fewer to worry backends */
105 	}
106 
107 	ia.groups.argc			= ngroup;
108 	ia.groups.argv			= pgroup;
109 	ia.arg[NSS_NETGR_MACHINE].argc	= nhost;
110 	ia.arg[NSS_NETGR_MACHINE].argv	= phost;
111 	ia.arg[NSS_NETGR_USER].argc	= nuser;
112 	ia.arg[NSS_NETGR_USER].argv	= puser;
113 	ia.arg[NSS_NETGR_DOMAIN].argc	= ndomain;
114 	ia.arg[NSS_NETGR_DOMAIN].argv	= pdomain;
115 	ia.status			= NSS_NETGR_NO;
116 
117 	(void) nss_search(&db_root, _nss_initf_netgroup,
118 	    NSS_DBOP_NETGROUP_IN, &ia);
119 	return (ia.status == NSS_NETGR_FOUND);
120 }
121 
122 int
innetgr(group,host,user,domain)123 innetgr(group, host, user, domain)
124 	const char *group, *host, *user, *domain;
125 {
126 #define	IA(charp)	\
127 	(nss_innetgr_argc)((charp) != 0), (nss_innetgr_argv)(&(charp))
128 
129 	return (__multi_innetgr(IA(group), IA(host), IA(user), IA(domain)));
130 }
131 
132 /*
133  * Context for setnetgrent()/getnetgrent().  If the user is being sensible
134  * the requests will be serialized anyway, but let's play safe and
135  * serialize them ourselves (anything to prevent a coredump)...
136  * We can't use lmutex_lock() here because we don't know what the backends
137  * that we call may call in turn.  They might call malloc()/free().
138  * So we use the brute-force callout_lock_enter() instead.
139  */
140 static nss_backend_t	*getnetgrent_backend;
141 
142 int
setnetgrent(const char * netgroup)143 setnetgrent(const char *netgroup)
144 {
145 	nss_backend_t	*be;
146 
147 	if (netgroup == NULL) {
148 		/* Prevent coredump, otherwise don't do anything profound */
149 		netgroup = "";
150 	}
151 
152 	callout_lock_enter();
153 	be = getnetgrent_backend;
154 	if (be != NULL && NSS_INVOKE_DBOP(be, NSS_DBOP_SETENT,
155 	    (void *)netgroup) != NSS_SUCCESS) {
156 		(void) NSS_INVOKE_DBOP(be, NSS_DBOP_DESTRUCTOR, 0);
157 		be = NULL;
158 	}
159 	if (be == NULL) {
160 		struct nss_setnetgrent_args	args;
161 
162 		args.netgroup	= netgroup;
163 		args.iterator	= 0;
164 		(void) nss_search(&db_root, _nss_initf_netgroup,
165 		    NSS_DBOP_NETGROUP_SET, &args);
166 		be = args.iterator;
167 	}
168 	getnetgrent_backend = be;
169 	callout_lock_exit();
170 	return (0);
171 }
172 
173 int
getnetgrent_r(machinep,namep,domainp,buffer,buflen)174 getnetgrent_r(machinep, namep, domainp, buffer, buflen)
175 	char		**machinep;
176 	char		**namep;
177 	char		**domainp;
178 	char		*buffer;
179 	int		buflen;
180 {
181 	struct nss_getnetgrent_args	args;
182 
183 	args.buffer	= buffer;
184 	args.buflen	= buflen;
185 	args.status	= NSS_NETGR_NO;
186 
187 	callout_lock_enter();
188 	if (getnetgrent_backend != 0) {
189 		(void) NSS_INVOKE_DBOP(getnetgrent_backend,
190 			NSS_DBOP_GETENT, &args);
191 	}
192 	callout_lock_exit();
193 
194 	if (args.status == NSS_NETGR_FOUND) {
195 		*machinep = args.retp[NSS_NETGR_MACHINE];
196 		*namep	  = args.retp[NSS_NETGR_USER];
197 		*domainp  = args.retp[NSS_NETGR_DOMAIN];
198 		return (1);
199 	} else {
200 		return (0);
201 	}
202 }
203 
204 static nss_XbyY_buf_t *buf;
205 
206 int
getnetgrent(machinep,namep,domainp)207 getnetgrent(machinep, namep, domainp)
208 	char		**machinep;
209 	char		**namep;
210 	char		**domainp;
211 {
212 	(void) NSS_XbyY_ALLOC(&buf, 0, NSS_BUFLEN_NETGROUP);
213 	return (getnetgrent_r(machinep, namep, domainp,
214 	    buf->buffer, buf->buflen));
215 }
216 
217 int
endnetgrent()218 endnetgrent()
219 {
220 	callout_lock_enter();
221 	if (getnetgrent_backend != 0) {
222 		(void) NSS_INVOKE_DBOP(getnetgrent_backend,
223 		    NSS_DBOP_DESTRUCTOR, 0);
224 		getnetgrent_backend = 0;
225 	}
226 	callout_lock_exit();
227 	nss_delete(&db_root);	/* === ? */
228 	NSS_XbyY_FREE(&buf);
229 	return (0);
230 }
231