1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 /*
30 * getnetgrent.c
31 *
32 * - name-service switch frontend routines for the netgroup API.
33 *
34 * Policy decision:
35 * If netgroup A refers to netgroup B, both must occur in the same
36 * source (any other choice gives very confusing semantics). This
37 * assumption is deeply embedded in the code below and in the backends.
38 *
39 * innetgr() is implemented on top of something called __multi_innetgr(),
40 * which replaces each (char *) argument of innetgr() with a counted vector
41 * of (char *). The semantics are the same as an OR of the results of
42 * innetgr() operations on each possible 4-tuple picked from the arguments,
43 * but it's possible to implement some cases more efficiently. This is
44 * important for mountd, which used to read YP netgroup.byhost directly in
45 * order to determine efficiently whether a given host belonged to any one
46 * of a long list of netgroups. Wildcarded arguments are indicated by a
47 * count of zero.
48 */
49
50 #include "lint.h"
51 #include <string.h>
52 #include <synch.h>
53 #include <nss_dbdefs.h>
54 #include <mtlib.h>
55 #include <libc.h>
56
57 static DEFINE_NSS_DB_ROOT(db_root);
58
59 void
_nss_initf_netgroup(p)60 _nss_initf_netgroup(p)
61 nss_db_params_t *p;
62 {
63 p->name = NSS_DBNAM_NETGROUP;
64 p->default_config = NSS_DEFCONF_NETGROUP;
65 }
66
67 /*
68 * The netgroup routines aren't quite like the majority of the switch clients.
69 * innetgr() more-or-less fits the getXXXbyYYY mould, but for the others:
70 * - setnetgrent("netgroup") is really a getXXXbyYYY routine, i.e. it
71 * searches the sources until it finds an entry with the given name.
72 * Rather than returning the (potentially large) entry, it simply
73 * initializes a cursor, and then...
74 * - getnetgrent(...) is repeatedly invoked by the user to extract the
75 * contents of the entry found by setnetgrent().
76 * - endnetgrent() is almost like a real endXXXent routine.
77 * The behaviour in NSS was:
78 * If we were certain that all the backends could provide netgroup information
79 * in a common form, we could make the setnetgrent() backend return the entire
80 * entry to the frontend, then implement getnetgrent() and endnetgrent()
81 * strictly in the frontend (aka here). But we're not certain, so we won't.
82 * In NSS2:
83 * Since nscd returns the results, and it is nscd that accumulates
84 * the results, then we can return the entire result on the setnetgrent.
85 *
86 * NOTE:
87 * In the SunOS 4.x (YP) version of this code, innetgr() did not
88 * affect the state of {set,get,end}netgrent(). Somewhere out
89 * there probably lurks a program that depends on this behaviour,
90 * so this version (both frontend and backends) had better
91 * behave the same way.
92 */
93
94 /* ===> ?? fix "__" name */
95 int
__multi_innetgr(ngroup,pgroup,nhost,phost,nuser,puser,ndomain,pdomain)96 __multi_innetgr(ngroup, pgroup,
97 nhost, phost,
98 nuser, puser,
99 ndomain, pdomain)
100 nss_innetgr_argc ngroup, nhost, nuser, ndomain;
101 nss_innetgr_argv pgroup, phost, puser, pdomain;
102 {
103 struct nss_innetgr_args ia;
104
105 if (ngroup == 0) {
106 return (0); /* One thing fewer to worry backends */
107 }
108
109 ia.groups.argc = ngroup;
110 ia.groups.argv = pgroup;
111 ia.arg[NSS_NETGR_MACHINE].argc = nhost;
112 ia.arg[NSS_NETGR_MACHINE].argv = phost;
113 ia.arg[NSS_NETGR_USER].argc = nuser;
114 ia.arg[NSS_NETGR_USER].argv = puser;
115 ia.arg[NSS_NETGR_DOMAIN].argc = ndomain;
116 ia.arg[NSS_NETGR_DOMAIN].argv = pdomain;
117 ia.status = NSS_NETGR_NO;
118
119 (void) nss_search(&db_root, _nss_initf_netgroup,
120 NSS_DBOP_NETGROUP_IN, &ia);
121 return (ia.status == NSS_NETGR_FOUND);
122 }
123
124 int
innetgr(group,host,user,domain)125 innetgr(group, host, user, domain)
126 const char *group, *host, *user, *domain;
127 {
128 #define IA(charp) \
129 (nss_innetgr_argc)((charp) != 0), (nss_innetgr_argv)(&(charp))
130
131 return (__multi_innetgr(IA(group), IA(host), IA(user), IA(domain)));
132 }
133
134 /*
135 * Context for setnetgrent()/getnetgrent(). If the user is being sensible
136 * the requests will be serialized anyway, but let's play safe and
137 * serialize them ourselves (anything to prevent a coredump)...
138 * We can't use lmutex_lock() here because we don't know what the backends
139 * that we call may call in turn. They might call malloc()/free().
140 * So we use the brute-force callout_lock_enter() instead.
141 */
142 static nss_backend_t *getnetgrent_backend;
143
144 int
setnetgrent(const char * netgroup)145 setnetgrent(const char *netgroup)
146 {
147 nss_backend_t *be;
148
149 if (netgroup == NULL) {
150 /* Prevent coredump, otherwise don't do anything profound */
151 netgroup = "";
152 }
153
154 callout_lock_enter();
155 be = getnetgrent_backend;
156 if (be != NULL && NSS_INVOKE_DBOP(be, NSS_DBOP_SETENT,
157 (void *)netgroup) != NSS_SUCCESS) {
158 (void) NSS_INVOKE_DBOP(be, NSS_DBOP_DESTRUCTOR, 0);
159 be = NULL;
160 }
161 if (be == NULL) {
162 struct nss_setnetgrent_args args;
163
164 args.netgroup = netgroup;
165 args.iterator = 0;
166 (void) nss_search(&db_root, _nss_initf_netgroup,
167 NSS_DBOP_NETGROUP_SET, &args);
168 be = args.iterator;
169 }
170 getnetgrent_backend = be;
171 callout_lock_exit();
172 return (0);
173 }
174
175 int
getnetgrent_r(machinep,namep,domainp,buffer,buflen)176 getnetgrent_r(machinep, namep, domainp, buffer, buflen)
177 char **machinep;
178 char **namep;
179 char **domainp;
180 char *buffer;
181 int buflen;
182 {
183 struct nss_getnetgrent_args args;
184
185 args.buffer = buffer;
186 args.buflen = buflen;
187 args.status = NSS_NETGR_NO;
188
189 callout_lock_enter();
190 if (getnetgrent_backend != 0) {
191 (void) NSS_INVOKE_DBOP(getnetgrent_backend,
192 NSS_DBOP_GETENT, &args);
193 }
194 callout_lock_exit();
195
196 if (args.status == NSS_NETGR_FOUND) {
197 *machinep = args.retp[NSS_NETGR_MACHINE];
198 *namep = args.retp[NSS_NETGR_USER];
199 *domainp = args.retp[NSS_NETGR_DOMAIN];
200 return (1);
201 } else {
202 return (0);
203 }
204 }
205
206 static nss_XbyY_buf_t *buf;
207
208 int
getnetgrent(machinep,namep,domainp)209 getnetgrent(machinep, namep, domainp)
210 char **machinep;
211 char **namep;
212 char **domainp;
213 {
214 (void) NSS_XbyY_ALLOC(&buf, 0, NSS_BUFLEN_NETGROUP);
215 return (getnetgrent_r(machinep, namep, domainp,
216 buf->buffer, buf->buflen));
217 }
218
219 int
endnetgrent()220 endnetgrent()
221 {
222 callout_lock_enter();
223 if (getnetgrent_backend != 0) {
224 (void) NSS_INVOKE_DBOP(getnetgrent_backend,
225 NSS_DBOP_DESTRUCTOR, 0);
226 getnetgrent_backend = 0;
227 }
228 callout_lock_exit();
229 nss_delete(&db_root); /* === ? */
230 NSS_XbyY_FREE(&buf);
231 return (0);
232 }
233