xref: /linux/net/ipv6/ip6_flowlabel.c (revision bfd5bb6f90af092aa345b15cd78143956a13c2a8)
1 /*
2  *	ip6_flowlabel.c		IPv6 flowlabel manager.
3  *
4  *	This program is free software; you can redistribute it and/or
5  *      modify it under the terms of the GNU General Public License
6  *      as published by the Free Software Foundation; either version
7  *      2 of the License, or (at your option) any later version.
8  *
9  *	Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10  */
11 
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/socket.h>
16 #include <linux/net.h>
17 #include <linux/netdevice.h>
18 #include <linux/in6.h>
19 #include <linux/proc_fs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/pid_namespace.h>
24 
25 #include <net/net_namespace.h>
26 #include <net/sock.h>
27 
28 #include <net/ipv6.h>
29 #include <net/rawv6.h>
30 #include <net/transp_v6.h>
31 
32 #include <linux/uaccess.h>
33 
34 #define FL_MIN_LINGER	6	/* Minimal linger. It is set to 6sec specified
35 				   in old IPv6 RFC. Well, it was reasonable value.
36 				 */
37 #define FL_MAX_LINGER	150	/* Maximal linger timeout */
38 
39 /* FL hash table */
40 
41 #define FL_MAX_PER_SOCK	32
42 #define FL_MAX_SIZE	4096
43 #define FL_HASH_MASK	255
44 #define FL_HASH(l)	(ntohl(l)&FL_HASH_MASK)
45 
46 static atomic_t fl_size = ATOMIC_INIT(0);
47 static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
48 
49 static void ip6_fl_gc(struct timer_list *unused);
50 static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
51 
52 /* FL hash table lock: it protects only of GC */
53 
54 static DEFINE_SPINLOCK(ip6_fl_lock);
55 
56 /* Big socket sock */
57 
58 static DEFINE_SPINLOCK(ip6_sk_fl_lock);
59 
60 #define for_each_fl_rcu(hash, fl)				\
61 	for (fl = rcu_dereference_bh(fl_ht[(hash)]);		\
62 	     fl != NULL;					\
63 	     fl = rcu_dereference_bh(fl->next))
64 #define for_each_fl_continue_rcu(fl)				\
65 	for (fl = rcu_dereference_bh(fl->next);			\
66 	     fl != NULL;					\
67 	     fl = rcu_dereference_bh(fl->next))
68 
69 #define for_each_sk_fl_rcu(np, sfl)				\
70 	for (sfl = rcu_dereference_bh(np->ipv6_fl_list);	\
71 	     sfl != NULL;					\
72 	     sfl = rcu_dereference_bh(sfl->next))
73 
74 static inline struct ip6_flowlabel *__fl_lookup(struct net *net, __be32 label)
75 {
76 	struct ip6_flowlabel *fl;
77 
78 	for_each_fl_rcu(FL_HASH(label), fl) {
79 		if (fl->label == label && net_eq(fl->fl_net, net))
80 			return fl;
81 	}
82 	return NULL;
83 }
84 
85 static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
86 {
87 	struct ip6_flowlabel *fl;
88 
89 	rcu_read_lock_bh();
90 	fl = __fl_lookup(net, label);
91 	if (fl && !atomic_inc_not_zero(&fl->users))
92 		fl = NULL;
93 	rcu_read_unlock_bh();
94 	return fl;
95 }
96 
97 
98 static void fl_free(struct ip6_flowlabel *fl)
99 {
100 	if (fl) {
101 		if (fl->share == IPV6_FL_S_PROCESS)
102 			put_pid(fl->owner.pid);
103 		kfree(fl->opt);
104 		kfree_rcu(fl, rcu);
105 	}
106 }
107 
108 static void fl_release(struct ip6_flowlabel *fl)
109 {
110 	spin_lock_bh(&ip6_fl_lock);
111 
112 	fl->lastuse = jiffies;
113 	if (atomic_dec_and_test(&fl->users)) {
114 		unsigned long ttd = fl->lastuse + fl->linger;
115 		if (time_after(ttd, fl->expires))
116 			fl->expires = ttd;
117 		ttd = fl->expires;
118 		if (fl->opt && fl->share == IPV6_FL_S_EXCL) {
119 			struct ipv6_txoptions *opt = fl->opt;
120 			fl->opt = NULL;
121 			kfree(opt);
122 		}
123 		if (!timer_pending(&ip6_fl_gc_timer) ||
124 		    time_after(ip6_fl_gc_timer.expires, ttd))
125 			mod_timer(&ip6_fl_gc_timer, ttd);
126 	}
127 	spin_unlock_bh(&ip6_fl_lock);
128 }
129 
130 static void ip6_fl_gc(struct timer_list *unused)
131 {
132 	int i;
133 	unsigned long now = jiffies;
134 	unsigned long sched = 0;
135 
136 	spin_lock(&ip6_fl_lock);
137 
138 	for (i = 0; i <= FL_HASH_MASK; i++) {
139 		struct ip6_flowlabel *fl;
140 		struct ip6_flowlabel __rcu **flp;
141 
142 		flp = &fl_ht[i];
143 		while ((fl = rcu_dereference_protected(*flp,
144 						       lockdep_is_held(&ip6_fl_lock))) != NULL) {
145 			if (atomic_read(&fl->users) == 0) {
146 				unsigned long ttd = fl->lastuse + fl->linger;
147 				if (time_after(ttd, fl->expires))
148 					fl->expires = ttd;
149 				ttd = fl->expires;
150 				if (time_after_eq(now, ttd)) {
151 					*flp = fl->next;
152 					fl_free(fl);
153 					atomic_dec(&fl_size);
154 					continue;
155 				}
156 				if (!sched || time_before(ttd, sched))
157 					sched = ttd;
158 			}
159 			flp = &fl->next;
160 		}
161 	}
162 	if (!sched && atomic_read(&fl_size))
163 		sched = now + FL_MAX_LINGER;
164 	if (sched) {
165 		mod_timer(&ip6_fl_gc_timer, sched);
166 	}
167 	spin_unlock(&ip6_fl_lock);
168 }
169 
170 static void __net_exit ip6_fl_purge(struct net *net)
171 {
172 	int i;
173 
174 	spin_lock_bh(&ip6_fl_lock);
175 	for (i = 0; i <= FL_HASH_MASK; i++) {
176 		struct ip6_flowlabel *fl;
177 		struct ip6_flowlabel __rcu **flp;
178 
179 		flp = &fl_ht[i];
180 		while ((fl = rcu_dereference_protected(*flp,
181 						       lockdep_is_held(&ip6_fl_lock))) != NULL) {
182 			if (net_eq(fl->fl_net, net) &&
183 			    atomic_read(&fl->users) == 0) {
184 				*flp = fl->next;
185 				fl_free(fl);
186 				atomic_dec(&fl_size);
187 				continue;
188 			}
189 			flp = &fl->next;
190 		}
191 	}
192 	spin_unlock_bh(&ip6_fl_lock);
193 }
194 
195 static struct ip6_flowlabel *fl_intern(struct net *net,
196 				       struct ip6_flowlabel *fl, __be32 label)
197 {
198 	struct ip6_flowlabel *lfl;
199 
200 	fl->label = label & IPV6_FLOWLABEL_MASK;
201 
202 	spin_lock_bh(&ip6_fl_lock);
203 	if (label == 0) {
204 		for (;;) {
205 			fl->label = htonl(prandom_u32())&IPV6_FLOWLABEL_MASK;
206 			if (fl->label) {
207 				lfl = __fl_lookup(net, fl->label);
208 				if (!lfl)
209 					break;
210 			}
211 		}
212 	} else {
213 		/*
214 		 * we dropper the ip6_fl_lock, so this entry could reappear
215 		 * and we need to recheck with it.
216 		 *
217 		 * OTOH no need to search the active socket first, like it is
218 		 * done in ipv6_flowlabel_opt - sock is locked, so new entry
219 		 * with the same label can only appear on another sock
220 		 */
221 		lfl = __fl_lookup(net, fl->label);
222 		if (lfl) {
223 			atomic_inc(&lfl->users);
224 			spin_unlock_bh(&ip6_fl_lock);
225 			return lfl;
226 		}
227 	}
228 
229 	fl->lastuse = jiffies;
230 	fl->next = fl_ht[FL_HASH(fl->label)];
231 	rcu_assign_pointer(fl_ht[FL_HASH(fl->label)], fl);
232 	atomic_inc(&fl_size);
233 	spin_unlock_bh(&ip6_fl_lock);
234 	return NULL;
235 }
236 
237 
238 
239 /* Socket flowlabel lists */
240 
241 struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
242 {
243 	struct ipv6_fl_socklist *sfl;
244 	struct ipv6_pinfo *np = inet6_sk(sk);
245 
246 	label &= IPV6_FLOWLABEL_MASK;
247 
248 	rcu_read_lock_bh();
249 	for_each_sk_fl_rcu(np, sfl) {
250 		struct ip6_flowlabel *fl = sfl->fl;
251 		if (fl->label == label) {
252 			fl->lastuse = jiffies;
253 			atomic_inc(&fl->users);
254 			rcu_read_unlock_bh();
255 			return fl;
256 		}
257 	}
258 	rcu_read_unlock_bh();
259 	return NULL;
260 }
261 EXPORT_SYMBOL_GPL(fl6_sock_lookup);
262 
263 void fl6_free_socklist(struct sock *sk)
264 {
265 	struct ipv6_pinfo *np = inet6_sk(sk);
266 	struct ipv6_fl_socklist *sfl;
267 
268 	if (!rcu_access_pointer(np->ipv6_fl_list))
269 		return;
270 
271 	spin_lock_bh(&ip6_sk_fl_lock);
272 	while ((sfl = rcu_dereference_protected(np->ipv6_fl_list,
273 						lockdep_is_held(&ip6_sk_fl_lock))) != NULL) {
274 		np->ipv6_fl_list = sfl->next;
275 		spin_unlock_bh(&ip6_sk_fl_lock);
276 
277 		fl_release(sfl->fl);
278 		kfree_rcu(sfl, rcu);
279 
280 		spin_lock_bh(&ip6_sk_fl_lock);
281 	}
282 	spin_unlock_bh(&ip6_sk_fl_lock);
283 }
284 
285 /* Service routines */
286 
287 
288 /*
289    It is the only difficult place. flowlabel enforces equal headers
290    before and including routing header, however user may supply options
291    following rthdr.
292  */
293 
294 struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
295 					 struct ip6_flowlabel *fl,
296 					 struct ipv6_txoptions *fopt)
297 {
298 	struct ipv6_txoptions *fl_opt = fl->opt;
299 
300 	if (!fopt || fopt->opt_flen == 0)
301 		return fl_opt;
302 
303 	if (fl_opt) {
304 		opt_space->hopopt = fl_opt->hopopt;
305 		opt_space->dst0opt = fl_opt->dst0opt;
306 		opt_space->srcrt = fl_opt->srcrt;
307 		opt_space->opt_nflen = fl_opt->opt_nflen;
308 	} else {
309 		if (fopt->opt_nflen == 0)
310 			return fopt;
311 		opt_space->hopopt = NULL;
312 		opt_space->dst0opt = NULL;
313 		opt_space->srcrt = NULL;
314 		opt_space->opt_nflen = 0;
315 	}
316 	opt_space->dst1opt = fopt->dst1opt;
317 	opt_space->opt_flen = fopt->opt_flen;
318 	opt_space->tot_len = fopt->tot_len;
319 	return opt_space;
320 }
321 EXPORT_SYMBOL_GPL(fl6_merge_options);
322 
323 static unsigned long check_linger(unsigned long ttl)
324 {
325 	if (ttl < FL_MIN_LINGER)
326 		return FL_MIN_LINGER*HZ;
327 	if (ttl > FL_MAX_LINGER && !capable(CAP_NET_ADMIN))
328 		return 0;
329 	return ttl*HZ;
330 }
331 
332 static int fl6_renew(struct ip6_flowlabel *fl, unsigned long linger, unsigned long expires)
333 {
334 	linger = check_linger(linger);
335 	if (!linger)
336 		return -EPERM;
337 	expires = check_linger(expires);
338 	if (!expires)
339 		return -EPERM;
340 
341 	spin_lock_bh(&ip6_fl_lock);
342 	fl->lastuse = jiffies;
343 	if (time_before(fl->linger, linger))
344 		fl->linger = linger;
345 	if (time_before(expires, fl->linger))
346 		expires = fl->linger;
347 	if (time_before(fl->expires, fl->lastuse + expires))
348 		fl->expires = fl->lastuse + expires;
349 	spin_unlock_bh(&ip6_fl_lock);
350 
351 	return 0;
352 }
353 
354 static struct ip6_flowlabel *
355 fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq,
356 	  char __user *optval, int optlen, int *err_p)
357 {
358 	struct ip6_flowlabel *fl = NULL;
359 	int olen;
360 	int addr_type;
361 	int err;
362 
363 	olen = optlen - CMSG_ALIGN(sizeof(*freq));
364 	err = -EINVAL;
365 	if (olen > 64 * 1024)
366 		goto done;
367 
368 	err = -ENOMEM;
369 	fl = kzalloc(sizeof(*fl), GFP_KERNEL);
370 	if (!fl)
371 		goto done;
372 
373 	if (olen > 0) {
374 		struct msghdr msg;
375 		struct flowi6 flowi6;
376 		struct sockcm_cookie sockc_junk;
377 		struct ipcm6_cookie ipc6;
378 
379 		err = -ENOMEM;
380 		fl->opt = kmalloc(sizeof(*fl->opt) + olen, GFP_KERNEL);
381 		if (!fl->opt)
382 			goto done;
383 
384 		memset(fl->opt, 0, sizeof(*fl->opt));
385 		fl->opt->tot_len = sizeof(*fl->opt) + olen;
386 		err = -EFAULT;
387 		if (copy_from_user(fl->opt+1, optval+CMSG_ALIGN(sizeof(*freq)), olen))
388 			goto done;
389 
390 		msg.msg_controllen = olen;
391 		msg.msg_control = (void *)(fl->opt+1);
392 		memset(&flowi6, 0, sizeof(flowi6));
393 
394 		ipc6.opt = fl->opt;
395 		err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, &ipc6, &sockc_junk);
396 		if (err)
397 			goto done;
398 		err = -EINVAL;
399 		if (fl->opt->opt_flen)
400 			goto done;
401 		if (fl->opt->opt_nflen == 0) {
402 			kfree(fl->opt);
403 			fl->opt = NULL;
404 		}
405 	}
406 
407 	fl->fl_net = net;
408 	fl->expires = jiffies;
409 	err = fl6_renew(fl, freq->flr_linger, freq->flr_expires);
410 	if (err)
411 		goto done;
412 	fl->share = freq->flr_share;
413 	addr_type = ipv6_addr_type(&freq->flr_dst);
414 	if ((addr_type & IPV6_ADDR_MAPPED) ||
415 	    addr_type == IPV6_ADDR_ANY) {
416 		err = -EINVAL;
417 		goto done;
418 	}
419 	fl->dst = freq->flr_dst;
420 	atomic_set(&fl->users, 1);
421 	switch (fl->share) {
422 	case IPV6_FL_S_EXCL:
423 	case IPV6_FL_S_ANY:
424 		break;
425 	case IPV6_FL_S_PROCESS:
426 		fl->owner.pid = get_task_pid(current, PIDTYPE_PID);
427 		break;
428 	case IPV6_FL_S_USER:
429 		fl->owner.uid = current_euid();
430 		break;
431 	default:
432 		err = -EINVAL;
433 		goto done;
434 	}
435 	return fl;
436 
437 done:
438 	fl_free(fl);
439 	*err_p = err;
440 	return NULL;
441 }
442 
443 static int mem_check(struct sock *sk)
444 {
445 	struct ipv6_pinfo *np = inet6_sk(sk);
446 	struct ipv6_fl_socklist *sfl;
447 	int room = FL_MAX_SIZE - atomic_read(&fl_size);
448 	int count = 0;
449 
450 	if (room > FL_MAX_SIZE - FL_MAX_PER_SOCK)
451 		return 0;
452 
453 	rcu_read_lock_bh();
454 	for_each_sk_fl_rcu(np, sfl)
455 		count++;
456 	rcu_read_unlock_bh();
457 
458 	if (room <= 0 ||
459 	    ((count >= FL_MAX_PER_SOCK ||
460 	      (count > 0 && room < FL_MAX_SIZE/2) || room < FL_MAX_SIZE/4) &&
461 	     !capable(CAP_NET_ADMIN)))
462 		return -ENOBUFS;
463 
464 	return 0;
465 }
466 
467 static inline void fl_link(struct ipv6_pinfo *np, struct ipv6_fl_socklist *sfl,
468 		struct ip6_flowlabel *fl)
469 {
470 	spin_lock_bh(&ip6_sk_fl_lock);
471 	sfl->fl = fl;
472 	sfl->next = np->ipv6_fl_list;
473 	rcu_assign_pointer(np->ipv6_fl_list, sfl);
474 	spin_unlock_bh(&ip6_sk_fl_lock);
475 }
476 
477 int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq,
478 			   int flags)
479 {
480 	struct ipv6_pinfo *np = inet6_sk(sk);
481 	struct ipv6_fl_socklist *sfl;
482 
483 	if (flags & IPV6_FL_F_REMOTE) {
484 		freq->flr_label = np->rcv_flowinfo & IPV6_FLOWLABEL_MASK;
485 		return 0;
486 	}
487 
488 	if (np->repflow) {
489 		freq->flr_label = np->flow_label;
490 		return 0;
491 	}
492 
493 	rcu_read_lock_bh();
494 
495 	for_each_sk_fl_rcu(np, sfl) {
496 		if (sfl->fl->label == (np->flow_label & IPV6_FLOWLABEL_MASK)) {
497 			spin_lock_bh(&ip6_fl_lock);
498 			freq->flr_label = sfl->fl->label;
499 			freq->flr_dst = sfl->fl->dst;
500 			freq->flr_share = sfl->fl->share;
501 			freq->flr_expires = (sfl->fl->expires - jiffies) / HZ;
502 			freq->flr_linger = sfl->fl->linger / HZ;
503 
504 			spin_unlock_bh(&ip6_fl_lock);
505 			rcu_read_unlock_bh();
506 			return 0;
507 		}
508 	}
509 	rcu_read_unlock_bh();
510 
511 	return -ENOENT;
512 }
513 
514 int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
515 {
516 	int uninitialized_var(err);
517 	struct net *net = sock_net(sk);
518 	struct ipv6_pinfo *np = inet6_sk(sk);
519 	struct in6_flowlabel_req freq;
520 	struct ipv6_fl_socklist *sfl1 = NULL;
521 	struct ipv6_fl_socklist *sfl;
522 	struct ipv6_fl_socklist __rcu **sflp;
523 	struct ip6_flowlabel *fl, *fl1 = NULL;
524 
525 
526 	if (optlen < sizeof(freq))
527 		return -EINVAL;
528 
529 	if (copy_from_user(&freq, optval, sizeof(freq)))
530 		return -EFAULT;
531 
532 	switch (freq.flr_action) {
533 	case IPV6_FL_A_PUT:
534 		if (freq.flr_flags & IPV6_FL_F_REFLECT) {
535 			if (sk->sk_protocol != IPPROTO_TCP)
536 				return -ENOPROTOOPT;
537 			if (!np->repflow)
538 				return -ESRCH;
539 			np->flow_label = 0;
540 			np->repflow = 0;
541 			return 0;
542 		}
543 		spin_lock_bh(&ip6_sk_fl_lock);
544 		for (sflp = &np->ipv6_fl_list;
545 		     (sfl = rcu_dereference_protected(*sflp,
546 						      lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
547 		     sflp = &sfl->next) {
548 			if (sfl->fl->label == freq.flr_label) {
549 				if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
550 					np->flow_label &= ~IPV6_FLOWLABEL_MASK;
551 				*sflp = sfl->next;
552 				spin_unlock_bh(&ip6_sk_fl_lock);
553 				fl_release(sfl->fl);
554 				kfree_rcu(sfl, rcu);
555 				return 0;
556 			}
557 		}
558 		spin_unlock_bh(&ip6_sk_fl_lock);
559 		return -ESRCH;
560 
561 	case IPV6_FL_A_RENEW:
562 		rcu_read_lock_bh();
563 		for_each_sk_fl_rcu(np, sfl) {
564 			if (sfl->fl->label == freq.flr_label) {
565 				err = fl6_renew(sfl->fl, freq.flr_linger, freq.flr_expires);
566 				rcu_read_unlock_bh();
567 				return err;
568 			}
569 		}
570 		rcu_read_unlock_bh();
571 
572 		if (freq.flr_share == IPV6_FL_S_NONE &&
573 		    ns_capable(net->user_ns, CAP_NET_ADMIN)) {
574 			fl = fl_lookup(net, freq.flr_label);
575 			if (fl) {
576 				err = fl6_renew(fl, freq.flr_linger, freq.flr_expires);
577 				fl_release(fl);
578 				return err;
579 			}
580 		}
581 		return -ESRCH;
582 
583 	case IPV6_FL_A_GET:
584 		if (freq.flr_flags & IPV6_FL_F_REFLECT) {
585 			struct net *net = sock_net(sk);
586 			if (net->ipv6.sysctl.flowlabel_consistency) {
587 				net_info_ratelimited("Can not set IPV6_FL_F_REFLECT if flowlabel_consistency sysctl is enable\n");
588 				return -EPERM;
589 			}
590 
591 			if (sk->sk_protocol != IPPROTO_TCP)
592 				return -ENOPROTOOPT;
593 
594 			np->repflow = 1;
595 			return 0;
596 		}
597 
598 		if (freq.flr_label & ~IPV6_FLOWLABEL_MASK)
599 			return -EINVAL;
600 
601 		if (net->ipv6.sysctl.flowlabel_state_ranges &&
602 		    (freq.flr_label & IPV6_FLOWLABEL_STATELESS_FLAG))
603 			return -ERANGE;
604 
605 		fl = fl_create(net, sk, &freq, optval, optlen, &err);
606 		if (!fl)
607 			return err;
608 		sfl1 = kmalloc(sizeof(*sfl1), GFP_KERNEL);
609 
610 		if (freq.flr_label) {
611 			err = -EEXIST;
612 			rcu_read_lock_bh();
613 			for_each_sk_fl_rcu(np, sfl) {
614 				if (sfl->fl->label == freq.flr_label) {
615 					if (freq.flr_flags&IPV6_FL_F_EXCL) {
616 						rcu_read_unlock_bh();
617 						goto done;
618 					}
619 					fl1 = sfl->fl;
620 					atomic_inc(&fl1->users);
621 					break;
622 				}
623 			}
624 			rcu_read_unlock_bh();
625 
626 			if (!fl1)
627 				fl1 = fl_lookup(net, freq.flr_label);
628 			if (fl1) {
629 recheck:
630 				err = -EEXIST;
631 				if (freq.flr_flags&IPV6_FL_F_EXCL)
632 					goto release;
633 				err = -EPERM;
634 				if (fl1->share == IPV6_FL_S_EXCL ||
635 				    fl1->share != fl->share ||
636 				    ((fl1->share == IPV6_FL_S_PROCESS) &&
637 				     (fl1->owner.pid == fl->owner.pid)) ||
638 				    ((fl1->share == IPV6_FL_S_USER) &&
639 				     uid_eq(fl1->owner.uid, fl->owner.uid)))
640 					goto release;
641 
642 				err = -ENOMEM;
643 				if (!sfl1)
644 					goto release;
645 				if (fl->linger > fl1->linger)
646 					fl1->linger = fl->linger;
647 				if ((long)(fl->expires - fl1->expires) > 0)
648 					fl1->expires = fl->expires;
649 				fl_link(np, sfl1, fl1);
650 				fl_free(fl);
651 				return 0;
652 
653 release:
654 				fl_release(fl1);
655 				goto done;
656 			}
657 		}
658 		err = -ENOENT;
659 		if (!(freq.flr_flags&IPV6_FL_F_CREATE))
660 			goto done;
661 
662 		err = -ENOMEM;
663 		if (!sfl1)
664 			goto done;
665 
666 		err = mem_check(sk);
667 		if (err != 0)
668 			goto done;
669 
670 		fl1 = fl_intern(net, fl, freq.flr_label);
671 		if (fl1)
672 			goto recheck;
673 
674 		if (!freq.flr_label) {
675 			if (copy_to_user(&((struct in6_flowlabel_req __user *) optval)->flr_label,
676 					 &fl->label, sizeof(fl->label))) {
677 				/* Intentionally ignore fault. */
678 			}
679 		}
680 
681 		fl_link(np, sfl1, fl);
682 		return 0;
683 
684 	default:
685 		return -EINVAL;
686 	}
687 
688 done:
689 	fl_free(fl);
690 	kfree(sfl1);
691 	return err;
692 }
693 
694 #ifdef CONFIG_PROC_FS
695 
696 struct ip6fl_iter_state {
697 	struct seq_net_private p;
698 	struct pid_namespace *pid_ns;
699 	int bucket;
700 };
701 
702 #define ip6fl_seq_private(seq)	((struct ip6fl_iter_state *)(seq)->private)
703 
704 static struct ip6_flowlabel *ip6fl_get_first(struct seq_file *seq)
705 {
706 	struct ip6_flowlabel *fl = NULL;
707 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
708 	struct net *net = seq_file_net(seq);
709 
710 	for (state->bucket = 0; state->bucket <= FL_HASH_MASK; ++state->bucket) {
711 		for_each_fl_rcu(state->bucket, fl) {
712 			if (net_eq(fl->fl_net, net))
713 				goto out;
714 		}
715 	}
716 	fl = NULL;
717 out:
718 	return fl;
719 }
720 
721 static struct ip6_flowlabel *ip6fl_get_next(struct seq_file *seq, struct ip6_flowlabel *fl)
722 {
723 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
724 	struct net *net = seq_file_net(seq);
725 
726 	for_each_fl_continue_rcu(fl) {
727 		if (net_eq(fl->fl_net, net))
728 			goto out;
729 	}
730 
731 try_again:
732 	if (++state->bucket <= FL_HASH_MASK) {
733 		for_each_fl_rcu(state->bucket, fl) {
734 			if (net_eq(fl->fl_net, net))
735 				goto out;
736 		}
737 		goto try_again;
738 	}
739 	fl = NULL;
740 
741 out:
742 	return fl;
743 }
744 
745 static struct ip6_flowlabel *ip6fl_get_idx(struct seq_file *seq, loff_t pos)
746 {
747 	struct ip6_flowlabel *fl = ip6fl_get_first(seq);
748 	if (fl)
749 		while (pos && (fl = ip6fl_get_next(seq, fl)) != NULL)
750 			--pos;
751 	return pos ? NULL : fl;
752 }
753 
754 static void *ip6fl_seq_start(struct seq_file *seq, loff_t *pos)
755 	__acquires(RCU)
756 {
757 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
758 
759 	state->pid_ns = proc_pid_ns(file_inode(seq->file));
760 
761 	rcu_read_lock_bh();
762 	return *pos ? ip6fl_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
763 }
764 
765 static void *ip6fl_seq_next(struct seq_file *seq, void *v, loff_t *pos)
766 {
767 	struct ip6_flowlabel *fl;
768 
769 	if (v == SEQ_START_TOKEN)
770 		fl = ip6fl_get_first(seq);
771 	else
772 		fl = ip6fl_get_next(seq, v);
773 	++*pos;
774 	return fl;
775 }
776 
777 static void ip6fl_seq_stop(struct seq_file *seq, void *v)
778 	__releases(RCU)
779 {
780 	rcu_read_unlock_bh();
781 }
782 
783 static int ip6fl_seq_show(struct seq_file *seq, void *v)
784 {
785 	struct ip6fl_iter_state *state = ip6fl_seq_private(seq);
786 	if (v == SEQ_START_TOKEN) {
787 		seq_puts(seq, "Label S Owner  Users  Linger Expires  Dst                              Opt\n");
788 	} else {
789 		struct ip6_flowlabel *fl = v;
790 		seq_printf(seq,
791 			   "%05X %-1d %-6d %-6d %-6ld %-8ld %pi6 %-4d\n",
792 			   (unsigned int)ntohl(fl->label),
793 			   fl->share,
794 			   ((fl->share == IPV6_FL_S_PROCESS) ?
795 			    pid_nr_ns(fl->owner.pid, state->pid_ns) :
796 			    ((fl->share == IPV6_FL_S_USER) ?
797 			     from_kuid_munged(seq_user_ns(seq), fl->owner.uid) :
798 			     0)),
799 			   atomic_read(&fl->users),
800 			   fl->linger/HZ,
801 			   (long)(fl->expires - jiffies)/HZ,
802 			   &fl->dst,
803 			   fl->opt ? fl->opt->opt_nflen : 0);
804 	}
805 	return 0;
806 }
807 
808 static const struct seq_operations ip6fl_seq_ops = {
809 	.start	=	ip6fl_seq_start,
810 	.next	=	ip6fl_seq_next,
811 	.stop	=	ip6fl_seq_stop,
812 	.show	=	ip6fl_seq_show,
813 };
814 
815 static int __net_init ip6_flowlabel_proc_init(struct net *net)
816 {
817 	if (!proc_create_net("ip6_flowlabel", 0444, net->proc_net,
818 			&ip6fl_seq_ops, sizeof(struct ip6fl_iter_state)))
819 		return -ENOMEM;
820 	return 0;
821 }
822 
823 static void __net_exit ip6_flowlabel_proc_fini(struct net *net)
824 {
825 	remove_proc_entry("ip6_flowlabel", net->proc_net);
826 }
827 #else
828 static inline int ip6_flowlabel_proc_init(struct net *net)
829 {
830 	return 0;
831 }
832 static inline void ip6_flowlabel_proc_fini(struct net *net)
833 {
834 }
835 #endif
836 
837 static void __net_exit ip6_flowlabel_net_exit(struct net *net)
838 {
839 	ip6_fl_purge(net);
840 	ip6_flowlabel_proc_fini(net);
841 }
842 
843 static struct pernet_operations ip6_flowlabel_net_ops = {
844 	.init = ip6_flowlabel_proc_init,
845 	.exit = ip6_flowlabel_net_exit,
846 };
847 
848 int ip6_flowlabel_init(void)
849 {
850 	return register_pernet_subsys(&ip6_flowlabel_net_ops);
851 }
852 
853 void ip6_flowlabel_cleanup(void)
854 {
855 	del_timer(&ip6_fl_gc_timer);
856 	unregister_pernet_subsys(&ip6_flowlabel_net_ops);
857 }
858