xref: /linux/net/sunrpc/svcauth_unix.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/types.h>
3 #include <linux/sched.h>
4 #include <linux/module.h>
5 #include <linux/sunrpc/types.h>
6 #include <linux/sunrpc/xdr.h>
7 #include <linux/sunrpc/svcsock.h>
8 #include <linux/sunrpc/svcauth.h>
9 #include <linux/sunrpc/gss_api.h>
10 #include <linux/sunrpc/addr.h>
11 #include <linux/err.h>
12 #include <linux/seq_file.h>
13 #include <linux/hash.h>
14 #include <linux/string.h>
15 #include <linux/slab.h>
16 #include <net/sock.h>
17 #include <net/ipv6.h>
18 #include <linux/kernel.h>
19 #include <linux/user_namespace.h>
20 #include <trace/events/sunrpc.h>
21 
22 #define RPCDBG_FACILITY	RPCDBG_AUTH
23 
24 #include "netns.h"
25 
26 /*
27  * AUTHUNIX and AUTHNULL credentials are both handled here.
28  * AUTHNULL is treated just like AUTHUNIX except that the uid/gid
29  * are always nobody (-2).  i.e. we do the same IP address checks for
30  * AUTHNULL as for AUTHUNIX, and that is done here.
31  */
32 
33 
34 struct unix_domain {
35 	struct auth_domain	h;
36 	/* other stuff later */
37 };
38 
39 extern struct auth_ops svcauth_null;
40 extern struct auth_ops svcauth_unix;
41 extern struct auth_ops svcauth_tls;
42 
svcauth_unix_domain_release_rcu(struct rcu_head * head)43 static void svcauth_unix_domain_release_rcu(struct rcu_head *head)
44 {
45 	struct auth_domain *dom = container_of(head, struct auth_domain, rcu_head);
46 	struct unix_domain *ud = container_of(dom, struct unix_domain, h);
47 
48 	kfree(dom->name);
49 	kfree(ud);
50 }
51 
svcauth_unix_domain_release(struct auth_domain * dom)52 static void svcauth_unix_domain_release(struct auth_domain *dom)
53 {
54 	call_rcu(&dom->rcu_head, svcauth_unix_domain_release_rcu);
55 }
56 
unix_domain_find(char * name)57 struct auth_domain *unix_domain_find(char *name)
58 {
59 	struct auth_domain *rv;
60 	struct unix_domain *new = NULL;
61 
62 	rv = auth_domain_find(name);
63 	while(1) {
64 		if (rv) {
65 			if (new && rv != &new->h)
66 				svcauth_unix_domain_release(&new->h);
67 
68 			if (rv->flavour != &svcauth_unix) {
69 				auth_domain_put(rv);
70 				return NULL;
71 			}
72 			return rv;
73 		}
74 
75 		new = kmalloc(sizeof(*new), GFP_KERNEL);
76 		if (new == NULL)
77 			return NULL;
78 		kref_init(&new->h.ref);
79 		new->h.name = kstrdup(name, GFP_KERNEL);
80 		if (new->h.name == NULL) {
81 			kfree(new);
82 			return NULL;
83 		}
84 		new->h.flavour = &svcauth_unix;
85 		rv = auth_domain_lookup(name, &new->h);
86 	}
87 }
88 EXPORT_SYMBOL_GPL(unix_domain_find);
89 
90 
91 /**************************************************
92  * cache for IP address to unix_domain
93  * as needed by AUTH_UNIX
94  */
95 #define	IP_HASHBITS	8
96 #define	IP_HASHMAX	(1<<IP_HASHBITS)
97 
98 struct ip_map {
99 	struct cache_head	h;
100 	char			m_class[8]; /* e.g. "nfsd" */
101 	struct in6_addr		m_addr;
102 	struct unix_domain	*m_client;
103 	struct rcu_head		m_rcu;
104 };
105 
ip_map_put(struct kref * kref)106 static void ip_map_put(struct kref *kref)
107 {
108 	struct cache_head *item = container_of(kref, struct cache_head, ref);
109 	struct ip_map *im = container_of(item, struct ip_map,h);
110 
111 	if (test_bit(CACHE_VALID, &item->flags) &&
112 	    !test_bit(CACHE_NEGATIVE, &item->flags))
113 		auth_domain_put(&im->m_client->h);
114 	kfree_rcu(im, m_rcu);
115 }
116 
hash_ip6(const struct in6_addr * ip)117 static inline int hash_ip6(const struct in6_addr *ip)
118 {
119 	return hash_32(ipv6_addr_hash(ip), IP_HASHBITS);
120 }
ip_map_match(struct cache_head * corig,struct cache_head * cnew)121 static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
122 {
123 	struct ip_map *orig = container_of(corig, struct ip_map, h);
124 	struct ip_map *new = container_of(cnew, struct ip_map, h);
125 	return strcmp(orig->m_class, new->m_class) == 0 &&
126 	       ipv6_addr_equal(&orig->m_addr, &new->m_addr);
127 }
ip_map_init(struct cache_head * cnew,struct cache_head * citem)128 static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
129 {
130 	struct ip_map *new = container_of(cnew, struct ip_map, h);
131 	struct ip_map *item = container_of(citem, struct ip_map, h);
132 
133 	strcpy(new->m_class, item->m_class);
134 	new->m_addr = item->m_addr;
135 }
update(struct cache_head * cnew,struct cache_head * citem)136 static void update(struct cache_head *cnew, struct cache_head *citem)
137 {
138 	struct ip_map *new = container_of(cnew, struct ip_map, h);
139 	struct ip_map *item = container_of(citem, struct ip_map, h);
140 
141 	kref_get(&item->m_client->h.ref);
142 	new->m_client = item->m_client;
143 }
ip_map_alloc(void)144 static struct cache_head *ip_map_alloc(void)
145 {
146 	struct ip_map *i = kmalloc(sizeof(*i), GFP_KERNEL);
147 	if (i)
148 		return &i->h;
149 	else
150 		return NULL;
151 }
152 
ip_map_upcall(struct cache_detail * cd,struct cache_head * h)153 static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
154 {
155 	return sunrpc_cache_pipe_upcall(cd, h);
156 }
157 
ip_map_request(struct cache_detail * cd,struct cache_head * h,char ** bpp,int * blen)158 static void ip_map_request(struct cache_detail *cd,
159 				  struct cache_head *h,
160 				  char **bpp, int *blen)
161 {
162 	char text_addr[40];
163 	struct ip_map *im = container_of(h, struct ip_map, h);
164 
165 	if (ipv6_addr_v4mapped(&(im->m_addr))) {
166 		snprintf(text_addr, 20, "%pI4", &im->m_addr.s6_addr32[3]);
167 	} else {
168 		snprintf(text_addr, 40, "%pI6", &im->m_addr);
169 	}
170 	qword_add(bpp, blen, im->m_class);
171 	qword_add(bpp, blen, text_addr);
172 	(*bpp)[-1] = '\n';
173 }
174 
175 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
176 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time64_t expiry);
177 
ip_map_parse(struct cache_detail * cd,char * mesg,int mlen)178 static int ip_map_parse(struct cache_detail *cd,
179 			  char *mesg, int mlen)
180 {
181 	/* class ipaddress [domainname] */
182 	/* should be safe just to use the start of the input buffer
183 	 * for scratch: */
184 	char *buf = mesg;
185 	int len;
186 	char class[8];
187 	union {
188 		struct sockaddr		sa;
189 		struct sockaddr_in	s4;
190 		struct sockaddr_in6	s6;
191 	} address;
192 	struct sockaddr_in6 sin6;
193 	int err;
194 
195 	struct ip_map *ipmp;
196 	struct auth_domain *dom;
197 	time64_t expiry;
198 
199 	if (mesg[mlen-1] != '\n')
200 		return -EINVAL;
201 	mesg[mlen-1] = 0;
202 
203 	/* class */
204 	len = qword_get(&mesg, class, sizeof(class));
205 	if (len <= 0) return -EINVAL;
206 
207 	/* ip address */
208 	len = qword_get(&mesg, buf, mlen);
209 	if (len <= 0) return -EINVAL;
210 
211 	if (rpc_pton(cd->net, buf, len, &address.sa, sizeof(address)) == 0)
212 		return -EINVAL;
213 	switch (address.sa.sa_family) {
214 	case AF_INET:
215 		/* Form a mapped IPv4 address in sin6 */
216 		sin6.sin6_family = AF_INET6;
217 		ipv6_addr_set_v4mapped(address.s4.sin_addr.s_addr,
218 				&sin6.sin6_addr);
219 		break;
220 #if IS_ENABLED(CONFIG_IPV6)
221 	case AF_INET6:
222 		memcpy(&sin6, &address.s6, sizeof(sin6));
223 		break;
224 #endif
225 	default:
226 		return -EINVAL;
227 	}
228 
229 	err = get_expiry(&mesg, &expiry);
230 	if (err)
231 		return err;
232 
233 	/* domainname, or empty for NEGATIVE */
234 	len = qword_get(&mesg, buf, mlen);
235 	if (len < 0) return -EINVAL;
236 
237 	if (len) {
238 		dom = unix_domain_find(buf);
239 		if (dom == NULL)
240 			return -ENOENT;
241 	} else
242 		dom = NULL;
243 
244 	/* IPv6 scope IDs are ignored for now */
245 	ipmp = __ip_map_lookup(cd, class, &sin6.sin6_addr);
246 	if (ipmp) {
247 		err = __ip_map_update(cd, ipmp,
248 			     container_of(dom, struct unix_domain, h),
249 			     expiry);
250 	} else
251 		err = -ENOMEM;
252 
253 	if (dom)
254 		auth_domain_put(dom);
255 
256 	cache_flush();
257 	return err;
258 }
259 
ip_map_show(struct seq_file * m,struct cache_detail * cd,struct cache_head * h)260 static int ip_map_show(struct seq_file *m,
261 		       struct cache_detail *cd,
262 		       struct cache_head *h)
263 {
264 	struct ip_map *im;
265 	struct in6_addr addr;
266 	char *dom = "-no-domain-";
267 
268 	if (h == NULL) {
269 		seq_puts(m, "#class IP domain\n");
270 		return 0;
271 	}
272 	im = container_of(h, struct ip_map, h);
273 	/* class addr domain */
274 	addr = im->m_addr;
275 
276 	if (test_bit(CACHE_VALID, &h->flags) &&
277 	    !test_bit(CACHE_NEGATIVE, &h->flags))
278 		dom = im->m_client->h.name;
279 
280 	if (ipv6_addr_v4mapped(&addr)) {
281 		seq_printf(m, "%s %pI4 %s\n",
282 			im->m_class, &addr.s6_addr32[3], dom);
283 	} else {
284 		seq_printf(m, "%s %pI6 %s\n", im->m_class, &addr, dom);
285 	}
286 	return 0;
287 }
288 
289 
__ip_map_lookup(struct cache_detail * cd,char * class,struct in6_addr * addr)290 static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class,
291 		struct in6_addr *addr)
292 {
293 	struct ip_map ip;
294 	struct cache_head *ch;
295 
296 	strcpy(ip.m_class, class);
297 	ip.m_addr = *addr;
298 	ch = sunrpc_cache_lookup_rcu(cd, &ip.h,
299 				     hash_str(class, IP_HASHBITS) ^
300 				     hash_ip6(addr));
301 
302 	if (ch)
303 		return container_of(ch, struct ip_map, h);
304 	else
305 		return NULL;
306 }
307 
__ip_map_update(struct cache_detail * cd,struct ip_map * ipm,struct unix_domain * udom,time64_t expiry)308 static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm,
309 		struct unix_domain *udom, time64_t expiry)
310 {
311 	struct ip_map ip;
312 	struct cache_head *ch;
313 
314 	ip.m_client = udom;
315 	ip.h.flags = 0;
316 	if (!udom)
317 		set_bit(CACHE_NEGATIVE, &ip.h.flags);
318 	ip.h.expiry_time = expiry;
319 	ch = sunrpc_cache_update(cd, &ip.h, &ipm->h,
320 				 hash_str(ipm->m_class, IP_HASHBITS) ^
321 				 hash_ip6(&ipm->m_addr));
322 	if (!ch)
323 		return -ENOMEM;
324 	cache_put(ch, cd);
325 	return 0;
326 }
327 
svcauth_unix_purge(struct net * net)328 void svcauth_unix_purge(struct net *net)
329 {
330 	struct sunrpc_net *sn;
331 
332 	sn = net_generic(net, sunrpc_net_id);
333 	cache_purge(sn->ip_map_cache);
334 }
335 EXPORT_SYMBOL_GPL(svcauth_unix_purge);
336 
337 static inline struct ip_map *
ip_map_cached_get(struct svc_xprt * xprt)338 ip_map_cached_get(struct svc_xprt *xprt)
339 {
340 	struct ip_map *ipm = NULL;
341 	struct sunrpc_net *sn;
342 
343 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
344 		spin_lock(&xprt->xpt_lock);
345 		ipm = xprt->xpt_auth_cache;
346 		if (ipm != NULL) {
347 			sn = net_generic(xprt->xpt_net, sunrpc_net_id);
348 			if (cache_is_expired(sn->ip_map_cache, &ipm->h)) {
349 				/*
350 				 * The entry has been invalidated since it was
351 				 * remembered, e.g. by a second mount from the
352 				 * same IP address.
353 				 */
354 				xprt->xpt_auth_cache = NULL;
355 				spin_unlock(&xprt->xpt_lock);
356 				cache_put(&ipm->h, sn->ip_map_cache);
357 				return NULL;
358 			}
359 			cache_get(&ipm->h);
360 		}
361 		spin_unlock(&xprt->xpt_lock);
362 	}
363 	return ipm;
364 }
365 
366 static inline void
ip_map_cached_put(struct svc_xprt * xprt,struct ip_map * ipm)367 ip_map_cached_put(struct svc_xprt *xprt, struct ip_map *ipm)
368 {
369 	if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
370 		spin_lock(&xprt->xpt_lock);
371 		if (xprt->xpt_auth_cache == NULL) {
372 			/* newly cached, keep the reference */
373 			xprt->xpt_auth_cache = ipm;
374 			ipm = NULL;
375 		}
376 		spin_unlock(&xprt->xpt_lock);
377 	}
378 	if (ipm) {
379 		struct sunrpc_net *sn;
380 
381 		sn = net_generic(xprt->xpt_net, sunrpc_net_id);
382 		cache_put(&ipm->h, sn->ip_map_cache);
383 	}
384 }
385 
386 void
svcauth_unix_info_release(struct svc_xprt * xpt)387 svcauth_unix_info_release(struct svc_xprt *xpt)
388 {
389 	struct ip_map *ipm;
390 
391 	ipm = xpt->xpt_auth_cache;
392 	if (ipm != NULL) {
393 		struct sunrpc_net *sn;
394 
395 		sn = net_generic(xpt->xpt_net, sunrpc_net_id);
396 		cache_put(&ipm->h, sn->ip_map_cache);
397 	}
398 }
399 
400 /****************************************************************************
401  * auth.unix.gid cache
402  * simple cache to map a UID to a list of GIDs
403  * because AUTH_UNIX aka AUTH_SYS has a max of UNX_NGROUPS
404  */
405 #define	GID_HASHBITS	8
406 #define	GID_HASHMAX	(1<<GID_HASHBITS)
407 
408 struct unix_gid {
409 	struct cache_head	h;
410 	kuid_t			uid;
411 	struct group_info	*gi;
412 	struct rcu_head		rcu;
413 };
414 
unix_gid_hash(kuid_t uid)415 static int unix_gid_hash(kuid_t uid)
416 {
417 	return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
418 }
419 
unix_gid_free(struct rcu_head * rcu)420 static void unix_gid_free(struct rcu_head *rcu)
421 {
422 	struct unix_gid *ug = container_of(rcu, struct unix_gid, rcu);
423 	struct cache_head *item = &ug->h;
424 
425 	if (test_bit(CACHE_VALID, &item->flags) &&
426 	    !test_bit(CACHE_NEGATIVE, &item->flags))
427 		put_group_info(ug->gi);
428 	kfree(ug);
429 }
430 
unix_gid_put(struct kref * kref)431 static void unix_gid_put(struct kref *kref)
432 {
433 	struct cache_head *item = container_of(kref, struct cache_head, ref);
434 	struct unix_gid *ug = container_of(item, struct unix_gid, h);
435 
436 	call_rcu(&ug->rcu, unix_gid_free);
437 }
438 
unix_gid_match(struct cache_head * corig,struct cache_head * cnew)439 static int unix_gid_match(struct cache_head *corig, struct cache_head *cnew)
440 {
441 	struct unix_gid *orig = container_of(corig, struct unix_gid, h);
442 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
443 	return uid_eq(orig->uid, new->uid);
444 }
unix_gid_init(struct cache_head * cnew,struct cache_head * citem)445 static void unix_gid_init(struct cache_head *cnew, struct cache_head *citem)
446 {
447 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
448 	struct unix_gid *item = container_of(citem, struct unix_gid, h);
449 	new->uid = item->uid;
450 }
unix_gid_update(struct cache_head * cnew,struct cache_head * citem)451 static void unix_gid_update(struct cache_head *cnew, struct cache_head *citem)
452 {
453 	struct unix_gid *new = container_of(cnew, struct unix_gid, h);
454 	struct unix_gid *item = container_of(citem, struct unix_gid, h);
455 
456 	get_group_info(item->gi);
457 	new->gi = item->gi;
458 }
unix_gid_alloc(void)459 static struct cache_head *unix_gid_alloc(void)
460 {
461 	struct unix_gid *g = kmalloc(sizeof(*g), GFP_KERNEL);
462 	if (g)
463 		return &g->h;
464 	else
465 		return NULL;
466 }
467 
unix_gid_upcall(struct cache_detail * cd,struct cache_head * h)468 static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
469 {
470 	return sunrpc_cache_pipe_upcall_timeout(cd, h);
471 }
472 
unix_gid_request(struct cache_detail * cd,struct cache_head * h,char ** bpp,int * blen)473 static void unix_gid_request(struct cache_detail *cd,
474 			     struct cache_head *h,
475 			     char **bpp, int *blen)
476 {
477 	char tuid[20];
478 	struct unix_gid *ug = container_of(h, struct unix_gid, h);
479 
480 	snprintf(tuid, 20, "%u", from_kuid(&init_user_ns, ug->uid));
481 	qword_add(bpp, blen, tuid);
482 	(*bpp)[-1] = '\n';
483 }
484 
485 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
486 
unix_gid_parse(struct cache_detail * cd,char * mesg,int mlen)487 static int unix_gid_parse(struct cache_detail *cd,
488 			char *mesg, int mlen)
489 {
490 	/* uid expiry Ngid gid0 gid1 ... gidN-1 */
491 	int id;
492 	kuid_t uid;
493 	int gids;
494 	int rv;
495 	int i;
496 	int err;
497 	time64_t expiry;
498 	struct unix_gid ug, *ugp;
499 
500 	if (mesg[mlen - 1] != '\n')
501 		return -EINVAL;
502 	mesg[mlen-1] = 0;
503 
504 	rv = get_int(&mesg, &id);
505 	if (rv)
506 		return -EINVAL;
507 	uid = make_kuid(current_user_ns(), id);
508 	ug.uid = uid;
509 
510 	err = get_expiry(&mesg, &expiry);
511 	if (err)
512 		return err;
513 
514 	rv = get_int(&mesg, &gids);
515 	if (rv || gids < 0 || gids > 8192)
516 		return -EINVAL;
517 
518 	ug.gi = groups_alloc(gids);
519 	if (!ug.gi)
520 		return -ENOMEM;
521 
522 	for (i = 0 ; i < gids ; i++) {
523 		int gid;
524 		kgid_t kgid;
525 		rv = get_int(&mesg, &gid);
526 		err = -EINVAL;
527 		if (rv)
528 			goto out;
529 		kgid = make_kgid(current_user_ns(), gid);
530 		if (!gid_valid(kgid))
531 			goto out;
532 		ug.gi->gid[i] = kgid;
533 	}
534 
535 	groups_sort(ug.gi);
536 	ugp = unix_gid_lookup(cd, uid);
537 	if (ugp) {
538 		struct cache_head *ch;
539 		ug.h.flags = 0;
540 		ug.h.expiry_time = expiry;
541 		ch = sunrpc_cache_update(cd,
542 					 &ug.h, &ugp->h,
543 					 unix_gid_hash(uid));
544 		if (!ch)
545 			err = -ENOMEM;
546 		else {
547 			err = 0;
548 			cache_put(ch, cd);
549 		}
550 	} else
551 		err = -ENOMEM;
552  out:
553 	if (ug.gi)
554 		put_group_info(ug.gi);
555 	return err;
556 }
557 
unix_gid_show(struct seq_file * m,struct cache_detail * cd,struct cache_head * h)558 static int unix_gid_show(struct seq_file *m,
559 			 struct cache_detail *cd,
560 			 struct cache_head *h)
561 {
562 	struct user_namespace *user_ns = m->file->f_cred->user_ns;
563 	struct unix_gid *ug;
564 	int i;
565 	int glen;
566 
567 	if (h == NULL) {
568 		seq_puts(m, "#uid cnt: gids...\n");
569 		return 0;
570 	}
571 	ug = container_of(h, struct unix_gid, h);
572 	if (test_bit(CACHE_VALID, &h->flags) &&
573 	    !test_bit(CACHE_NEGATIVE, &h->flags))
574 		glen = ug->gi->ngroups;
575 	else
576 		glen = 0;
577 
578 	seq_printf(m, "%u %d:", from_kuid_munged(user_ns, ug->uid), glen);
579 	for (i = 0; i < glen; i++)
580 		seq_printf(m, " %d", from_kgid_munged(user_ns, ug->gi->gid[i]));
581 	seq_printf(m, "\n");
582 	return 0;
583 }
584 
585 static const struct cache_detail unix_gid_cache_template = {
586 	.owner		= THIS_MODULE,
587 	.hash_size	= GID_HASHMAX,
588 	.name		= "auth.unix.gid",
589 	.cache_put	= unix_gid_put,
590 	.cache_upcall	= unix_gid_upcall,
591 	.cache_request	= unix_gid_request,
592 	.cache_parse	= unix_gid_parse,
593 	.cache_show	= unix_gid_show,
594 	.match		= unix_gid_match,
595 	.init		= unix_gid_init,
596 	.update		= unix_gid_update,
597 	.alloc		= unix_gid_alloc,
598 };
599 
unix_gid_cache_create(struct net * net)600 int unix_gid_cache_create(struct net *net)
601 {
602 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
603 	struct cache_detail *cd;
604 	int err;
605 
606 	cd = cache_create_net(&unix_gid_cache_template, net);
607 	if (IS_ERR(cd))
608 		return PTR_ERR(cd);
609 	err = cache_register_net(cd, net);
610 	if (err) {
611 		cache_destroy_net(cd, net);
612 		return err;
613 	}
614 	sn->unix_gid_cache = cd;
615 	return 0;
616 }
617 
unix_gid_cache_destroy(struct net * net)618 void unix_gid_cache_destroy(struct net *net)
619 {
620 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
621 	struct cache_detail *cd = sn->unix_gid_cache;
622 
623 	sn->unix_gid_cache = NULL;
624 	cache_purge(cd);
625 	cache_unregister_net(cd, net);
626 	cache_destroy_net(cd, net);
627 }
628 
unix_gid_lookup(struct cache_detail * cd,kuid_t uid)629 static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid)
630 {
631 	struct unix_gid ug;
632 	struct cache_head *ch;
633 
634 	ug.uid = uid;
635 	ch = sunrpc_cache_lookup_rcu(cd, &ug.h, unix_gid_hash(uid));
636 	if (ch)
637 		return container_of(ch, struct unix_gid, h);
638 	else
639 		return NULL;
640 }
641 
unix_gid_find(kuid_t uid,struct svc_rqst * rqstp)642 static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp)
643 {
644 	struct unix_gid *ug;
645 	struct group_info *gi;
646 	int ret;
647 	struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net,
648 					    sunrpc_net_id);
649 
650 	ug = unix_gid_lookup(sn->unix_gid_cache, uid);
651 	if (!ug)
652 		return ERR_PTR(-EAGAIN);
653 	ret = cache_check(sn->unix_gid_cache, &ug->h, &rqstp->rq_chandle);
654 	switch (ret) {
655 	case -ENOENT:
656 		return ERR_PTR(-ENOENT);
657 	case -ETIMEDOUT:
658 		return ERR_PTR(-ESHUTDOWN);
659 	case 0:
660 		gi = get_group_info(ug->gi);
661 		cache_put(&ug->h, sn->unix_gid_cache);
662 		return gi;
663 	default:
664 		return ERR_PTR(-EAGAIN);
665 	}
666 }
667 
668 enum svc_auth_status
svcauth_unix_set_client(struct svc_rqst * rqstp)669 svcauth_unix_set_client(struct svc_rqst *rqstp)
670 {
671 	struct sockaddr_in *sin;
672 	struct sockaddr_in6 *sin6, sin6_storage;
673 	struct ip_map *ipm;
674 	struct group_info *gi;
675 	struct svc_cred *cred = &rqstp->rq_cred;
676 	struct svc_xprt *xprt = rqstp->rq_xprt;
677 	struct net *net = xprt->xpt_net;
678 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
679 
680 	switch (rqstp->rq_addr.ss_family) {
681 	case AF_INET:
682 		sin = svc_addr_in(rqstp);
683 		sin6 = &sin6_storage;
684 		ipv6_addr_set_v4mapped(sin->sin_addr.s_addr, &sin6->sin6_addr);
685 		break;
686 	case AF_INET6:
687 		sin6 = svc_addr_in6(rqstp);
688 		break;
689 	default:
690 		BUG();
691 	}
692 
693 	rqstp->rq_client = NULL;
694 	if (rqstp->rq_proc == 0)
695 		goto out;
696 
697 	rqstp->rq_auth_stat = rpc_autherr_badcred;
698 	ipm = ip_map_cached_get(xprt);
699 	if (ipm == NULL)
700 		ipm = __ip_map_lookup(sn->ip_map_cache,
701 				      rqstp->rq_server->sv_programs->pg_class,
702 				    &sin6->sin6_addr);
703 
704 	if (ipm == NULL)
705 		return SVC_DENIED;
706 
707 	switch (cache_check(sn->ip_map_cache, &ipm->h, &rqstp->rq_chandle)) {
708 		default:
709 			BUG();
710 		case -ETIMEDOUT:
711 			return SVC_CLOSE;
712 		case -EAGAIN:
713 			return SVC_DROP;
714 		case -ENOENT:
715 			return SVC_DENIED;
716 		case 0:
717 			rqstp->rq_client = &ipm->m_client->h;
718 			kref_get(&rqstp->rq_client->ref);
719 			ip_map_cached_put(xprt, ipm);
720 			break;
721 	}
722 
723 	gi = unix_gid_find(cred->cr_uid, rqstp);
724 	switch (PTR_ERR(gi)) {
725 	case -EAGAIN:
726 		return SVC_DROP;
727 	case -ESHUTDOWN:
728 		return SVC_CLOSE;
729 	case -ENOENT:
730 		break;
731 	default:
732 		put_group_info(cred->cr_group_info);
733 		cred->cr_group_info = gi;
734 	}
735 
736 out:
737 	rqstp->rq_auth_stat = rpc_auth_ok;
738 	return SVC_OK;
739 }
740 EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
741 
742 /**
743  * svcauth_null_accept - Decode and validate incoming RPC_AUTH_NULL credential
744  * @rqstp: RPC transaction
745  *
746  * Return values:
747  *   %SVC_OK: Both credential and verifier are valid
748  *   %SVC_DENIED: Credential or verifier is not valid
749  *   %SVC_GARBAGE: Failed to decode credential or verifier
750  *   %SVC_CLOSE: Temporary failure
751  *
752  * rqstp->rq_auth_stat is set as mandated by RFC 5531.
753  */
754 static enum svc_auth_status
svcauth_null_accept(struct svc_rqst * rqstp)755 svcauth_null_accept(struct svc_rqst *rqstp)
756 {
757 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
758 	struct svc_cred	*cred = &rqstp->rq_cred;
759 	u32 flavor, len;
760 	void *body;
761 
762 	/* Length of Call's credential body field: */
763 	if (xdr_stream_decode_u32(xdr, &len) < 0)
764 		return SVC_GARBAGE;
765 	if (len != 0) {
766 		rqstp->rq_auth_stat = rpc_autherr_badcred;
767 		return SVC_DENIED;
768 	}
769 
770 	/* Call's verf field: */
771 	if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
772 		return SVC_GARBAGE;
773 	if (flavor != RPC_AUTH_NULL || len != 0) {
774 		rqstp->rq_auth_stat = rpc_autherr_badverf;
775 		return SVC_DENIED;
776 	}
777 
778 	/* Signal that mapping to nobody uid/gid is required */
779 	cred->cr_uid = INVALID_UID;
780 	cred->cr_gid = INVALID_GID;
781 	cred->cr_group_info = groups_alloc(0);
782 	if (cred->cr_group_info == NULL)
783 		return SVC_CLOSE; /* kmalloc failure - client must retry */
784 
785 	if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream,
786 					  RPC_AUTH_NULL, NULL, 0) < 0)
787 		return SVC_CLOSE;
788 	if (!svcxdr_set_accept_stat(rqstp))
789 		return SVC_CLOSE;
790 
791 	rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
792 	return SVC_OK;
793 }
794 
795 static int
svcauth_null_release(struct svc_rqst * rqstp)796 svcauth_null_release(struct svc_rqst *rqstp)
797 {
798 	if (rqstp->rq_client)
799 		auth_domain_put(rqstp->rq_client);
800 	rqstp->rq_client = NULL;
801 	if (rqstp->rq_cred.cr_group_info)
802 		put_group_info(rqstp->rq_cred.cr_group_info);
803 	rqstp->rq_cred.cr_group_info = NULL;
804 
805 	return 0; /* don't drop */
806 }
807 
808 
809 struct auth_ops svcauth_null = {
810 	.name		= "null",
811 	.owner		= THIS_MODULE,
812 	.flavour	= RPC_AUTH_NULL,
813 	.accept		= svcauth_null_accept,
814 	.release	= svcauth_null_release,
815 	.set_client	= svcauth_unix_set_client,
816 };
817 
818 
819 /**
820  * svcauth_tls_accept - Decode and validate incoming RPC_AUTH_TLS credential
821  * @rqstp: RPC transaction
822  *
823  * Return values:
824  *   %SVC_OK: Both credential and verifier are valid
825  *   %SVC_DENIED: Credential or verifier is not valid
826  *   %SVC_GARBAGE: Failed to decode credential or verifier
827  *   %SVC_CLOSE: Temporary failure
828  *
829  * rqstp->rq_auth_stat is set as mandated by RFC 5531.
830  */
831 static enum svc_auth_status
svcauth_tls_accept(struct svc_rqst * rqstp)832 svcauth_tls_accept(struct svc_rqst *rqstp)
833 {
834 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
835 	struct svc_cred	*cred = &rqstp->rq_cred;
836 	struct svc_xprt *xprt = rqstp->rq_xprt;
837 	u32 flavor, len;
838 	void *body;
839 	__be32 *p;
840 
841 	/* Length of Call's credential body field: */
842 	if (xdr_stream_decode_u32(xdr, &len) < 0)
843 		return SVC_GARBAGE;
844 	if (len != 0) {
845 		rqstp->rq_auth_stat = rpc_autherr_badcred;
846 		return SVC_DENIED;
847 	}
848 
849 	/* Call's verf field: */
850 	if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
851 		return SVC_GARBAGE;
852 	if (flavor != RPC_AUTH_NULL || len != 0) {
853 		rqstp->rq_auth_stat = rpc_autherr_badverf;
854 		return SVC_DENIED;
855 	}
856 
857 	/* AUTH_TLS is not valid on non-NULL procedures */
858 	if (rqstp->rq_proc != 0) {
859 		rqstp->rq_auth_stat = rpc_autherr_badcred;
860 		return SVC_DENIED;
861 	}
862 
863 	/* Signal that mapping to nobody uid/gid is required */
864 	cred->cr_uid = INVALID_UID;
865 	cred->cr_gid = INVALID_GID;
866 	cred->cr_group_info = groups_alloc(0);
867 	if (cred->cr_group_info == NULL)
868 		return SVC_CLOSE;
869 
870 	if (xprt->xpt_ops->xpo_handshake) {
871 		p = xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2 + 8);
872 		if (!p)
873 			return SVC_CLOSE;
874 		trace_svc_tls_start(xprt);
875 		*p++ = rpc_auth_null;
876 		*p++ = cpu_to_be32(8);
877 		memcpy(p, "STARTTLS", 8);
878 
879 		set_bit(XPT_HANDSHAKE, &xprt->xpt_flags);
880 		svc_xprt_enqueue(xprt);
881 	} else {
882 		trace_svc_tls_unavailable(xprt);
883 		if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream,
884 						  RPC_AUTH_NULL, NULL, 0) < 0)
885 			return SVC_CLOSE;
886 	}
887 	if (!svcxdr_set_accept_stat(rqstp))
888 		return SVC_CLOSE;
889 
890 	rqstp->rq_cred.cr_flavor = RPC_AUTH_TLS;
891 	return SVC_OK;
892 }
893 
894 struct auth_ops svcauth_tls = {
895 	.name		= "tls",
896 	.owner		= THIS_MODULE,
897 	.flavour	= RPC_AUTH_TLS,
898 	.accept		= svcauth_tls_accept,
899 	.release	= svcauth_null_release,
900 	.set_client	= svcauth_unix_set_client,
901 };
902 
903 
904 /**
905  * svcauth_unix_accept - Decode and validate incoming RPC_AUTH_SYS credential
906  * @rqstp: RPC transaction
907  *
908  * Return values:
909  *   %SVC_OK: Both credential and verifier are valid
910  *   %SVC_DENIED: Credential or verifier is not valid
911  *   %SVC_GARBAGE: Failed to decode credential or verifier
912  *   %SVC_CLOSE: Temporary failure
913  *
914  * rqstp->rq_auth_stat is set as mandated by RFC 5531.
915  */
916 static enum svc_auth_status
svcauth_unix_accept(struct svc_rqst * rqstp)917 svcauth_unix_accept(struct svc_rqst *rqstp)
918 {
919 	struct xdr_stream *xdr = &rqstp->rq_arg_stream;
920 	struct svc_cred	*cred = &rqstp->rq_cred;
921 	struct user_namespace *userns;
922 	u32 flavor, len, i;
923 	void *body;
924 	__be32 *p;
925 
926 	/*
927 	 * This implementation ignores the length of the Call's
928 	 * credential body field and the timestamp and machinename
929 	 * fields.
930 	 */
931 	p = xdr_inline_decode(xdr, XDR_UNIT * 3);
932 	if (!p)
933 		return SVC_GARBAGE;
934 	len = be32_to_cpup(p + 2);
935 	if (len > RPC_MAX_MACHINENAME)
936 		return SVC_GARBAGE;
937 	if (!xdr_inline_decode(xdr, len))
938 		return SVC_GARBAGE;
939 
940 	/*
941 	 * Note: we skip uid_valid()/gid_valid() checks here for
942 	 * backwards compatibility with clients that use -1 id's.
943 	 * Instead, -1 uid or gid is later mapped to the
944 	 * (export-specific) anonymous id by nfsd_setuser.
945 	 * Supplementary gid's will be left alone.
946 	 */
947 	userns = (rqstp->rq_xprt && rqstp->rq_xprt->xpt_cred) ?
948 		rqstp->rq_xprt->xpt_cred->user_ns : &init_user_ns;
949 	if (xdr_stream_decode_u32(xdr, &i) < 0)
950 		return SVC_GARBAGE;
951 	cred->cr_uid = make_kuid(userns, i);
952 	if (xdr_stream_decode_u32(xdr, &i) < 0)
953 		return SVC_GARBAGE;
954 	cred->cr_gid = make_kgid(userns, i);
955 
956 	if (xdr_stream_decode_u32(xdr, &len) < 0)
957 		return SVC_GARBAGE;
958 	if (len > UNX_NGROUPS)
959 		goto badcred;
960 	p = xdr_inline_decode(xdr, XDR_UNIT * len);
961 	if (!p)
962 		return SVC_GARBAGE;
963 	cred->cr_group_info = groups_alloc(len);
964 	if (cred->cr_group_info == NULL)
965 		return SVC_CLOSE;
966 	for (i = 0; i < len; i++) {
967 		kgid_t kgid = make_kgid(userns, be32_to_cpup(p++));
968 		cred->cr_group_info->gid[i] = kgid;
969 	}
970 	groups_sort(cred->cr_group_info);
971 
972 	/* Call's verf field: */
973 	if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
974 		return SVC_GARBAGE;
975 	if (flavor != RPC_AUTH_NULL || len != 0) {
976 		rqstp->rq_auth_stat = rpc_autherr_badverf;
977 		return SVC_DENIED;
978 	}
979 
980 	if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream,
981 					  RPC_AUTH_NULL, NULL, 0) < 0)
982 		return SVC_CLOSE;
983 	if (!svcxdr_set_accept_stat(rqstp))
984 		return SVC_CLOSE;
985 
986 	rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
987 	return SVC_OK;
988 
989 badcred:
990 	rqstp->rq_auth_stat = rpc_autherr_badcred;
991 	return SVC_DENIED;
992 }
993 
994 static int
svcauth_unix_release(struct svc_rqst * rqstp)995 svcauth_unix_release(struct svc_rqst *rqstp)
996 {
997 	/* Verifier (such as it is) is already in place.
998 	 */
999 	if (rqstp->rq_client)
1000 		auth_domain_put(rqstp->rq_client);
1001 	rqstp->rq_client = NULL;
1002 	if (rqstp->rq_cred.cr_group_info)
1003 		put_group_info(rqstp->rq_cred.cr_group_info);
1004 	rqstp->rq_cred.cr_group_info = NULL;
1005 
1006 	return 0;
1007 }
1008 
1009 
1010 struct auth_ops svcauth_unix = {
1011 	.name		= "unix",
1012 	.owner		= THIS_MODULE,
1013 	.flavour	= RPC_AUTH_UNIX,
1014 	.accept		= svcauth_unix_accept,
1015 	.release	= svcauth_unix_release,
1016 	.domain_release	= svcauth_unix_domain_release,
1017 	.set_client	= svcauth_unix_set_client,
1018 };
1019 
1020 static const struct cache_detail ip_map_cache_template = {
1021 	.owner		= THIS_MODULE,
1022 	.hash_size	= IP_HASHMAX,
1023 	.name		= "auth.unix.ip",
1024 	.cache_put	= ip_map_put,
1025 	.cache_upcall	= ip_map_upcall,
1026 	.cache_request	= ip_map_request,
1027 	.cache_parse	= ip_map_parse,
1028 	.cache_show	= ip_map_show,
1029 	.match		= ip_map_match,
1030 	.init		= ip_map_init,
1031 	.update		= update,
1032 	.alloc		= ip_map_alloc,
1033 };
1034 
ip_map_cache_create(struct net * net)1035 int ip_map_cache_create(struct net *net)
1036 {
1037 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1038 	struct cache_detail *cd;
1039 	int err;
1040 
1041 	cd = cache_create_net(&ip_map_cache_template, net);
1042 	if (IS_ERR(cd))
1043 		return PTR_ERR(cd);
1044 	err = cache_register_net(cd, net);
1045 	if (err) {
1046 		cache_destroy_net(cd, net);
1047 		return err;
1048 	}
1049 	sn->ip_map_cache = cd;
1050 	return 0;
1051 }
1052 
ip_map_cache_destroy(struct net * net)1053 void ip_map_cache_destroy(struct net *net)
1054 {
1055 	struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1056 	struct cache_detail *cd = sn->ip_map_cache;
1057 
1058 	sn->ip_map_cache = NULL;
1059 	cache_purge(cd);
1060 	cache_unregister_net(cd, net);
1061 	cache_destroy_net(cd, net);
1062 }
1063