xref: /linux/net/xfrm/xfrm_policy.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * xfrm_policy.c
4  *
5  * Changes:
6  *	Mitsuru KANDA @USAGI
7  * 	Kazunori MIYAZAWA @USAGI
8  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9  * 		IPv6 support
10  * 	Kazunori MIYAZAWA @USAGI
11  * 	YOSHIFUJI Hideaki
12  * 		Split up af-specific portion
13  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
14  *
15  */
16 
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <linux/icmp.h>
33 #include <net/dst.h>
34 #include <net/flow.h>
35 #include <net/inet_ecn.h>
36 #include <net/xfrm.h>
37 #include <net/ip.h>
38 #include <net/gre.h>
39 #if IS_ENABLED(CONFIG_IPV6_MIP6)
40 #include <net/mip6.h>
41 #endif
42 #ifdef CONFIG_XFRM_STATISTICS
43 #include <net/snmp.h>
44 #endif
45 #ifdef CONFIG_XFRM_ESPINTCP
46 #include <net/espintcp.h>
47 #endif
48 
49 #include "xfrm_hash.h"
50 
51 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
52 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
53 #define XFRM_MAX_QUEUE_LEN	100
54 
55 struct xfrm_flo {
56 	struct dst_entry *dst_orig;
57 	u8 flags;
58 };
59 
60 /* prefixes smaller than this are stored in lists, not trees. */
61 #define INEXACT_PREFIXLEN_IPV4	16
62 #define INEXACT_PREFIXLEN_IPV6	48
63 
64 struct xfrm_pol_inexact_node {
65 	struct rb_node node;
66 	union {
67 		xfrm_address_t addr;
68 		struct rcu_head rcu;
69 	};
70 	u8 prefixlen;
71 
72 	struct rb_root root;
73 
74 	/* the policies matching this node, can be empty list */
75 	struct hlist_head hhead;
76 };
77 
78 /* xfrm inexact policy search tree:
79  * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
80  *  |
81  * +---- root_d: sorted by daddr:prefix
82  * |                 |
83  * |        xfrm_pol_inexact_node
84  * |                 |
85  * |                 +- root: sorted by saddr/prefix
86  * |                 |              |
87  * |                 |         xfrm_pol_inexact_node
88  * |                 |              |
89  * |                 |              + root: unused
90  * |                 |              |
91  * |                 |              + hhead: saddr:daddr policies
92  * |                 |
93  * |                 +- coarse policies and all any:daddr policies
94  * |
95  * +---- root_s: sorted by saddr:prefix
96  * |                 |
97  * |        xfrm_pol_inexact_node
98  * |                 |
99  * |                 + root: unused
100  * |                 |
101  * |                 + hhead: saddr:any policies
102  * |
103  * +---- coarse policies and all any:any policies
104  *
105  * Lookups return four candidate lists:
106  * 1. any:any list from top-level xfrm_pol_inexact_bin
107  * 2. any:daddr list from daddr tree
108  * 3. saddr:daddr list from 2nd level daddr tree
109  * 4. saddr:any list from saddr tree
110  *
111  * This result set then needs to be searched for the policy with
112  * the lowest priority.  If two results have same prio, youngest one wins.
113  */
114 
115 struct xfrm_pol_inexact_key {
116 	possible_net_t net;
117 	u32 if_id;
118 	u16 family;
119 	u8 dir, type;
120 };
121 
122 struct xfrm_pol_inexact_bin {
123 	struct xfrm_pol_inexact_key k;
124 	struct rhash_head head;
125 	/* list containing '*:*' policies */
126 	struct hlist_head hhead;
127 
128 	seqcount_spinlock_t count;
129 	/* tree sorted by daddr/prefix */
130 	struct rb_root root_d;
131 
132 	/* tree sorted by saddr/prefix */
133 	struct rb_root root_s;
134 
135 	/* slow path below */
136 	struct list_head inexact_bins;
137 	struct rcu_head rcu;
138 };
139 
140 enum xfrm_pol_inexact_candidate_type {
141 	XFRM_POL_CAND_BOTH,
142 	XFRM_POL_CAND_SADDR,
143 	XFRM_POL_CAND_DADDR,
144 	XFRM_POL_CAND_ANY,
145 
146 	XFRM_POL_CAND_MAX,
147 };
148 
149 struct xfrm_pol_inexact_candidates {
150 	struct hlist_head *res[XFRM_POL_CAND_MAX];
151 };
152 
153 struct xfrm_flow_keys {
154 	struct flow_dissector_key_basic basic;
155 	struct flow_dissector_key_control control;
156 	union {
157 		struct flow_dissector_key_ipv4_addrs ipv4;
158 		struct flow_dissector_key_ipv6_addrs ipv6;
159 	} addrs;
160 	struct flow_dissector_key_ip ip;
161 	struct flow_dissector_key_icmp icmp;
162 	struct flow_dissector_key_ports ports;
163 	struct flow_dissector_key_keyid gre;
164 };
165 
166 static struct flow_dissector xfrm_session_dissector __ro_after_init;
167 
168 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
169 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
170 
171 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
172 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
173 						__read_mostly;
174 
175 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
176 
177 static struct rhashtable xfrm_policy_inexact_table;
178 static const struct rhashtable_params xfrm_pol_inexact_params;
179 
180 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
181 static int stale_bundle(struct dst_entry *dst);
182 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
183 static void xfrm_policy_queue_process(struct timer_list *t);
184 
185 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
186 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
187 						int dir);
188 
189 static struct xfrm_pol_inexact_bin *
190 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
191 			   u32 if_id);
192 
193 static struct xfrm_pol_inexact_bin *
194 xfrm_policy_inexact_lookup_rcu(struct net *net,
195 			       u8 type, u16 family, u8 dir, u32 if_id);
196 static struct xfrm_policy *
197 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
198 			bool excl);
199 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
200 					    struct xfrm_policy *policy);
201 
202 static bool
203 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
204 				    struct xfrm_pol_inexact_bin *b,
205 				    const xfrm_address_t *saddr,
206 				    const xfrm_address_t *daddr);
207 
208 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
209 {
210 	return refcount_inc_not_zero(&policy->refcnt);
211 }
212 
213 static inline bool
214 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
215 {
216 	const struct flowi4 *fl4 = &fl->u.ip4;
217 
218 	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
219 		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
220 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
221 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
222 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
223 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
224 }
225 
226 static inline bool
227 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
228 {
229 	const struct flowi6 *fl6 = &fl->u.ip6;
230 
231 	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
232 		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
233 		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
234 		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
235 		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
236 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
237 }
238 
239 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
240 			 unsigned short family)
241 {
242 	switch (family) {
243 	case AF_INET:
244 		return __xfrm4_selector_match(sel, fl);
245 	case AF_INET6:
246 		return __xfrm6_selector_match(sel, fl);
247 	}
248 	return false;
249 }
250 
251 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
252 {
253 	const struct xfrm_policy_afinfo *afinfo;
254 
255 	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
256 		return NULL;
257 	rcu_read_lock();
258 	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
259 	if (unlikely(!afinfo))
260 		rcu_read_unlock();
261 	return afinfo;
262 }
263 
264 /* Called with rcu_read_lock(). */
265 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
266 {
267 	return rcu_dereference(xfrm_if_cb);
268 }
269 
270 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
271 				    const xfrm_address_t *saddr,
272 				    const xfrm_address_t *daddr,
273 				    int family, u32 mark)
274 {
275 	const struct xfrm_policy_afinfo *afinfo;
276 	struct dst_entry *dst;
277 
278 	afinfo = xfrm_policy_get_afinfo(family);
279 	if (unlikely(afinfo == NULL))
280 		return ERR_PTR(-EAFNOSUPPORT);
281 
282 	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
283 
284 	rcu_read_unlock();
285 
286 	return dst;
287 }
288 EXPORT_SYMBOL(__xfrm_dst_lookup);
289 
290 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
291 						int tos, int oif,
292 						xfrm_address_t *prev_saddr,
293 						xfrm_address_t *prev_daddr,
294 						int family, u32 mark)
295 {
296 	struct net *net = xs_net(x);
297 	xfrm_address_t *saddr = &x->props.saddr;
298 	xfrm_address_t *daddr = &x->id.daddr;
299 	struct dst_entry *dst;
300 
301 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
302 		saddr = x->coaddr;
303 		daddr = prev_daddr;
304 	}
305 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
306 		saddr = prev_saddr;
307 		daddr = x->coaddr;
308 	}
309 
310 	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
311 
312 	if (!IS_ERR(dst)) {
313 		if (prev_saddr != saddr)
314 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
315 		if (prev_daddr != daddr)
316 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
317 	}
318 
319 	return dst;
320 }
321 
322 static inline unsigned long make_jiffies(long secs)
323 {
324 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
325 		return MAX_SCHEDULE_TIMEOUT-1;
326 	else
327 		return secs*HZ;
328 }
329 
330 static void xfrm_policy_timer(struct timer_list *t)
331 {
332 	struct xfrm_policy *xp = from_timer(xp, t, timer);
333 	time64_t now = ktime_get_real_seconds();
334 	time64_t next = TIME64_MAX;
335 	int warn = 0;
336 	int dir;
337 
338 	read_lock(&xp->lock);
339 
340 	if (unlikely(xp->walk.dead))
341 		goto out;
342 
343 	dir = xfrm_policy_id2dir(xp->index);
344 
345 	if (xp->lft.hard_add_expires_seconds) {
346 		time64_t tmo = xp->lft.hard_add_expires_seconds +
347 			xp->curlft.add_time - now;
348 		if (tmo <= 0)
349 			goto expired;
350 		if (tmo < next)
351 			next = tmo;
352 	}
353 	if (xp->lft.hard_use_expires_seconds) {
354 		time64_t tmo = xp->lft.hard_use_expires_seconds +
355 			(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
356 		if (tmo <= 0)
357 			goto expired;
358 		if (tmo < next)
359 			next = tmo;
360 	}
361 	if (xp->lft.soft_add_expires_seconds) {
362 		time64_t tmo = xp->lft.soft_add_expires_seconds +
363 			xp->curlft.add_time - now;
364 		if (tmo <= 0) {
365 			warn = 1;
366 			tmo = XFRM_KM_TIMEOUT;
367 		}
368 		if (tmo < next)
369 			next = tmo;
370 	}
371 	if (xp->lft.soft_use_expires_seconds) {
372 		time64_t tmo = xp->lft.soft_use_expires_seconds +
373 			(READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
374 		if (tmo <= 0) {
375 			warn = 1;
376 			tmo = XFRM_KM_TIMEOUT;
377 		}
378 		if (tmo < next)
379 			next = tmo;
380 	}
381 
382 	if (warn)
383 		km_policy_expired(xp, dir, 0, 0);
384 	if (next != TIME64_MAX &&
385 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
386 		xfrm_pol_hold(xp);
387 
388 out:
389 	read_unlock(&xp->lock);
390 	xfrm_pol_put(xp);
391 	return;
392 
393 expired:
394 	read_unlock(&xp->lock);
395 	if (!xfrm_policy_delete(xp, dir))
396 		km_policy_expired(xp, dir, 1, 0);
397 	xfrm_pol_put(xp);
398 }
399 
400 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
401  * SPD calls.
402  */
403 
404 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
405 {
406 	struct xfrm_policy *policy;
407 
408 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
409 
410 	if (policy) {
411 		write_pnet(&policy->xp_net, net);
412 		INIT_LIST_HEAD(&policy->walk.all);
413 		INIT_HLIST_NODE(&policy->bydst_inexact_list);
414 		INIT_HLIST_NODE(&policy->bydst);
415 		INIT_HLIST_NODE(&policy->byidx);
416 		rwlock_init(&policy->lock);
417 		refcount_set(&policy->refcnt, 1);
418 		skb_queue_head_init(&policy->polq.hold_queue);
419 		timer_setup(&policy->timer, xfrm_policy_timer, 0);
420 		timer_setup(&policy->polq.hold_timer,
421 			    xfrm_policy_queue_process, 0);
422 	}
423 	return policy;
424 }
425 EXPORT_SYMBOL(xfrm_policy_alloc);
426 
427 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
428 {
429 	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
430 
431 	security_xfrm_policy_free(policy->security);
432 	kfree(policy);
433 }
434 
435 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
436 
437 void xfrm_policy_destroy(struct xfrm_policy *policy)
438 {
439 	BUG_ON(!policy->walk.dead);
440 
441 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
442 		BUG();
443 
444 	xfrm_dev_policy_free(policy);
445 	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
446 }
447 EXPORT_SYMBOL(xfrm_policy_destroy);
448 
449 /* Rule must be locked. Release descendant resources, announce
450  * entry dead. The rule must be unlinked from lists to the moment.
451  */
452 
453 static void xfrm_policy_kill(struct xfrm_policy *policy)
454 {
455 	xfrm_dev_policy_delete(policy);
456 
457 	write_lock_bh(&policy->lock);
458 	policy->walk.dead = 1;
459 	write_unlock_bh(&policy->lock);
460 
461 	atomic_inc(&policy->genid);
462 
463 	if (del_timer(&policy->polq.hold_timer))
464 		xfrm_pol_put(policy);
465 	skb_queue_purge(&policy->polq.hold_queue);
466 
467 	if (del_timer(&policy->timer))
468 		xfrm_pol_put(policy);
469 
470 	xfrm_pol_put(policy);
471 }
472 
473 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
474 
475 static inline unsigned int idx_hash(struct net *net, u32 index)
476 {
477 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
478 }
479 
480 /* calculate policy hash thresholds */
481 static void __get_hash_thresh(struct net *net,
482 			      unsigned short family, int dir,
483 			      u8 *dbits, u8 *sbits)
484 {
485 	switch (family) {
486 	case AF_INET:
487 		*dbits = net->xfrm.policy_bydst[dir].dbits4;
488 		*sbits = net->xfrm.policy_bydst[dir].sbits4;
489 		break;
490 
491 	case AF_INET6:
492 		*dbits = net->xfrm.policy_bydst[dir].dbits6;
493 		*sbits = net->xfrm.policy_bydst[dir].sbits6;
494 		break;
495 
496 	default:
497 		*dbits = 0;
498 		*sbits = 0;
499 	}
500 }
501 
502 static struct hlist_head *policy_hash_bysel(struct net *net,
503 					    const struct xfrm_selector *sel,
504 					    unsigned short family, int dir)
505 {
506 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
507 	unsigned int hash;
508 	u8 dbits;
509 	u8 sbits;
510 
511 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
512 	hash = __sel_hash(sel, family, hmask, dbits, sbits);
513 
514 	if (hash == hmask + 1)
515 		return NULL;
516 
517 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
518 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
519 }
520 
521 static struct hlist_head *policy_hash_direct(struct net *net,
522 					     const xfrm_address_t *daddr,
523 					     const xfrm_address_t *saddr,
524 					     unsigned short family, int dir)
525 {
526 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
527 	unsigned int hash;
528 	u8 dbits;
529 	u8 sbits;
530 
531 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
532 	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
533 
534 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
535 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
536 }
537 
538 static void xfrm_dst_hash_transfer(struct net *net,
539 				   struct hlist_head *list,
540 				   struct hlist_head *ndsttable,
541 				   unsigned int nhashmask,
542 				   int dir)
543 {
544 	struct hlist_node *tmp, *entry0 = NULL;
545 	struct xfrm_policy *pol;
546 	unsigned int h0 = 0;
547 	u8 dbits;
548 	u8 sbits;
549 
550 redo:
551 	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
552 		unsigned int h;
553 
554 		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
555 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
556 				pol->family, nhashmask, dbits, sbits);
557 		if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
558 			hlist_del_rcu(&pol->bydst);
559 			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
560 			h0 = h;
561 		} else {
562 			if (h != h0)
563 				continue;
564 			hlist_del_rcu(&pol->bydst);
565 			hlist_add_behind_rcu(&pol->bydst, entry0);
566 		}
567 		entry0 = &pol->bydst;
568 	}
569 	if (!hlist_empty(list)) {
570 		entry0 = NULL;
571 		goto redo;
572 	}
573 }
574 
575 static void xfrm_idx_hash_transfer(struct hlist_head *list,
576 				   struct hlist_head *nidxtable,
577 				   unsigned int nhashmask)
578 {
579 	struct hlist_node *tmp;
580 	struct xfrm_policy *pol;
581 
582 	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
583 		unsigned int h;
584 
585 		h = __idx_hash(pol->index, nhashmask);
586 		hlist_add_head(&pol->byidx, nidxtable+h);
587 	}
588 }
589 
590 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
591 {
592 	return ((old_hmask + 1) << 1) - 1;
593 }
594 
595 static void xfrm_bydst_resize(struct net *net, int dir)
596 {
597 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
598 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
599 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
600 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
601 	struct hlist_head *odst;
602 	int i;
603 
604 	if (!ndst)
605 		return;
606 
607 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
608 	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
609 
610 	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
611 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
612 
613 	for (i = hmask; i >= 0; i--)
614 		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
615 
616 	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
617 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
618 
619 	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
620 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
621 
622 	synchronize_rcu();
623 
624 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
625 }
626 
627 static void xfrm_byidx_resize(struct net *net)
628 {
629 	unsigned int hmask = net->xfrm.policy_idx_hmask;
630 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
631 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
632 	struct hlist_head *oidx = net->xfrm.policy_byidx;
633 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
634 	int i;
635 
636 	if (!nidx)
637 		return;
638 
639 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
640 
641 	for (i = hmask; i >= 0; i--)
642 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
643 
644 	net->xfrm.policy_byidx = nidx;
645 	net->xfrm.policy_idx_hmask = nhashmask;
646 
647 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
648 
649 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
650 }
651 
652 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
653 {
654 	unsigned int cnt = net->xfrm.policy_count[dir];
655 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
656 
657 	if (total)
658 		*total += cnt;
659 
660 	if ((hmask + 1) < xfrm_policy_hashmax &&
661 	    cnt > hmask)
662 		return 1;
663 
664 	return 0;
665 }
666 
667 static inline int xfrm_byidx_should_resize(struct net *net, int total)
668 {
669 	unsigned int hmask = net->xfrm.policy_idx_hmask;
670 
671 	if ((hmask + 1) < xfrm_policy_hashmax &&
672 	    total > hmask)
673 		return 1;
674 
675 	return 0;
676 }
677 
678 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
679 {
680 	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
681 	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
682 	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
683 	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
684 	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
685 	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
686 	si->spdhcnt = net->xfrm.policy_idx_hmask;
687 	si->spdhmcnt = xfrm_policy_hashmax;
688 }
689 EXPORT_SYMBOL(xfrm_spd_getinfo);
690 
691 static DEFINE_MUTEX(hash_resize_mutex);
692 static void xfrm_hash_resize(struct work_struct *work)
693 {
694 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
695 	int dir, total;
696 
697 	mutex_lock(&hash_resize_mutex);
698 
699 	total = 0;
700 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
701 		if (xfrm_bydst_should_resize(net, dir, &total))
702 			xfrm_bydst_resize(net, dir);
703 	}
704 	if (xfrm_byidx_should_resize(net, total))
705 		xfrm_byidx_resize(net);
706 
707 	mutex_unlock(&hash_resize_mutex);
708 }
709 
710 /* Make sure *pol can be inserted into fastbin.
711  * Useful to check that later insert requests will be successful
712  * (provided xfrm_policy_lock is held throughout).
713  */
714 static struct xfrm_pol_inexact_bin *
715 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
716 {
717 	struct xfrm_pol_inexact_bin *bin, *prev;
718 	struct xfrm_pol_inexact_key k = {
719 		.family = pol->family,
720 		.type = pol->type,
721 		.dir = dir,
722 		.if_id = pol->if_id,
723 	};
724 	struct net *net = xp_net(pol);
725 
726 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
727 
728 	write_pnet(&k.net, net);
729 	bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
730 				     xfrm_pol_inexact_params);
731 	if (bin)
732 		return bin;
733 
734 	bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
735 	if (!bin)
736 		return NULL;
737 
738 	bin->k = k;
739 	INIT_HLIST_HEAD(&bin->hhead);
740 	bin->root_d = RB_ROOT;
741 	bin->root_s = RB_ROOT;
742 	seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
743 
744 	prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
745 						&bin->k, &bin->head,
746 						xfrm_pol_inexact_params);
747 	if (!prev) {
748 		list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
749 		return bin;
750 	}
751 
752 	kfree(bin);
753 
754 	return IS_ERR(prev) ? NULL : prev;
755 }
756 
757 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
758 					       int family, u8 prefixlen)
759 {
760 	if (xfrm_addr_any(addr, family))
761 		return true;
762 
763 	if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
764 		return true;
765 
766 	if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
767 		return true;
768 
769 	return false;
770 }
771 
772 static bool
773 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
774 {
775 	const xfrm_address_t *addr;
776 	bool saddr_any, daddr_any;
777 	u8 prefixlen;
778 
779 	addr = &policy->selector.saddr;
780 	prefixlen = policy->selector.prefixlen_s;
781 
782 	saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
783 						       policy->family,
784 						       prefixlen);
785 	addr = &policy->selector.daddr;
786 	prefixlen = policy->selector.prefixlen_d;
787 	daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
788 						       policy->family,
789 						       prefixlen);
790 	return saddr_any && daddr_any;
791 }
792 
793 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
794 				       const xfrm_address_t *addr, u8 prefixlen)
795 {
796 	node->addr = *addr;
797 	node->prefixlen = prefixlen;
798 }
799 
800 static struct xfrm_pol_inexact_node *
801 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
802 {
803 	struct xfrm_pol_inexact_node *node;
804 
805 	node = kzalloc(sizeof(*node), GFP_ATOMIC);
806 	if (node)
807 		xfrm_pol_inexact_node_init(node, addr, prefixlen);
808 
809 	return node;
810 }
811 
812 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
813 				  const xfrm_address_t *b,
814 				  u8 prefixlen, u16 family)
815 {
816 	u32 ma, mb, mask;
817 	unsigned int pdw, pbi;
818 	int delta = 0;
819 
820 	switch (family) {
821 	case AF_INET:
822 		if (prefixlen == 0)
823 			return 0;
824 		mask = ~0U << (32 - prefixlen);
825 		ma = ntohl(a->a4) & mask;
826 		mb = ntohl(b->a4) & mask;
827 		if (ma < mb)
828 			delta = -1;
829 		else if (ma > mb)
830 			delta = 1;
831 		break;
832 	case AF_INET6:
833 		pdw = prefixlen >> 5;
834 		pbi = prefixlen & 0x1f;
835 
836 		if (pdw) {
837 			delta = memcmp(a->a6, b->a6, pdw << 2);
838 			if (delta)
839 				return delta;
840 		}
841 		if (pbi) {
842 			mask = ~0U << (32 - pbi);
843 			ma = ntohl(a->a6[pdw]) & mask;
844 			mb = ntohl(b->a6[pdw]) & mask;
845 			if (ma < mb)
846 				delta = -1;
847 			else if (ma > mb)
848 				delta = 1;
849 		}
850 		break;
851 	default:
852 		break;
853 	}
854 
855 	return delta;
856 }
857 
858 static void xfrm_policy_inexact_list_reinsert(struct net *net,
859 					      struct xfrm_pol_inexact_node *n,
860 					      u16 family)
861 {
862 	unsigned int matched_s, matched_d;
863 	struct xfrm_policy *policy, *p;
864 
865 	matched_s = 0;
866 	matched_d = 0;
867 
868 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
869 		struct hlist_node *newpos = NULL;
870 		bool matches_s, matches_d;
871 
872 		if (policy->walk.dead || !policy->bydst_reinsert)
873 			continue;
874 
875 		WARN_ON_ONCE(policy->family != family);
876 
877 		policy->bydst_reinsert = false;
878 		hlist_for_each_entry(p, &n->hhead, bydst) {
879 			if (policy->priority > p->priority)
880 				newpos = &p->bydst;
881 			else if (policy->priority == p->priority &&
882 				 policy->pos > p->pos)
883 				newpos = &p->bydst;
884 			else
885 				break;
886 		}
887 
888 		if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
889 			hlist_add_behind_rcu(&policy->bydst, newpos);
890 		else
891 			hlist_add_head_rcu(&policy->bydst, &n->hhead);
892 
893 		/* paranoia checks follow.
894 		 * Check that the reinserted policy matches at least
895 		 * saddr or daddr for current node prefix.
896 		 *
897 		 * Matching both is fine, matching saddr in one policy
898 		 * (but not daddr) and then matching only daddr in another
899 		 * is a bug.
900 		 */
901 		matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
902 						   &n->addr,
903 						   n->prefixlen,
904 						   family) == 0;
905 		matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
906 						   &n->addr,
907 						   n->prefixlen,
908 						   family) == 0;
909 		if (matches_s && matches_d)
910 			continue;
911 
912 		WARN_ON_ONCE(!matches_s && !matches_d);
913 		if (matches_s)
914 			matched_s++;
915 		if (matches_d)
916 			matched_d++;
917 		WARN_ON_ONCE(matched_s && matched_d);
918 	}
919 }
920 
921 static void xfrm_policy_inexact_node_reinsert(struct net *net,
922 					      struct xfrm_pol_inexact_node *n,
923 					      struct rb_root *new,
924 					      u16 family)
925 {
926 	struct xfrm_pol_inexact_node *node;
927 	struct rb_node **p, *parent;
928 
929 	/* we should not have another subtree here */
930 	WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
931 restart:
932 	parent = NULL;
933 	p = &new->rb_node;
934 	while (*p) {
935 		u8 prefixlen;
936 		int delta;
937 
938 		parent = *p;
939 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
940 
941 		prefixlen = min(node->prefixlen, n->prefixlen);
942 
943 		delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
944 					       prefixlen, family);
945 		if (delta < 0) {
946 			p = &parent->rb_left;
947 		} else if (delta > 0) {
948 			p = &parent->rb_right;
949 		} else {
950 			bool same_prefixlen = node->prefixlen == n->prefixlen;
951 			struct xfrm_policy *tmp;
952 
953 			hlist_for_each_entry(tmp, &n->hhead, bydst) {
954 				tmp->bydst_reinsert = true;
955 				hlist_del_rcu(&tmp->bydst);
956 			}
957 
958 			node->prefixlen = prefixlen;
959 
960 			xfrm_policy_inexact_list_reinsert(net, node, family);
961 
962 			if (same_prefixlen) {
963 				kfree_rcu(n, rcu);
964 				return;
965 			}
966 
967 			rb_erase(*p, new);
968 			kfree_rcu(n, rcu);
969 			n = node;
970 			goto restart;
971 		}
972 	}
973 
974 	rb_link_node_rcu(&n->node, parent, p);
975 	rb_insert_color(&n->node, new);
976 }
977 
978 /* merge nodes v and n */
979 static void xfrm_policy_inexact_node_merge(struct net *net,
980 					   struct xfrm_pol_inexact_node *v,
981 					   struct xfrm_pol_inexact_node *n,
982 					   u16 family)
983 {
984 	struct xfrm_pol_inexact_node *node;
985 	struct xfrm_policy *tmp;
986 	struct rb_node *rnode;
987 
988 	/* To-be-merged node v has a subtree.
989 	 *
990 	 * Dismantle it and insert its nodes to n->root.
991 	 */
992 	while ((rnode = rb_first(&v->root)) != NULL) {
993 		node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
994 		rb_erase(&node->node, &v->root);
995 		xfrm_policy_inexact_node_reinsert(net, node, &n->root,
996 						  family);
997 	}
998 
999 	hlist_for_each_entry(tmp, &v->hhead, bydst) {
1000 		tmp->bydst_reinsert = true;
1001 		hlist_del_rcu(&tmp->bydst);
1002 	}
1003 
1004 	xfrm_policy_inexact_list_reinsert(net, n, family);
1005 }
1006 
1007 static struct xfrm_pol_inexact_node *
1008 xfrm_policy_inexact_insert_node(struct net *net,
1009 				struct rb_root *root,
1010 				xfrm_address_t *addr,
1011 				u16 family, u8 prefixlen, u8 dir)
1012 {
1013 	struct xfrm_pol_inexact_node *cached = NULL;
1014 	struct rb_node **p, *parent = NULL;
1015 	struct xfrm_pol_inexact_node *node;
1016 
1017 	p = &root->rb_node;
1018 	while (*p) {
1019 		int delta;
1020 
1021 		parent = *p;
1022 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1023 
1024 		delta = xfrm_policy_addr_delta(addr, &node->addr,
1025 					       node->prefixlen,
1026 					       family);
1027 		if (delta == 0 && prefixlen >= node->prefixlen) {
1028 			WARN_ON_ONCE(cached); /* ipsec policies got lost */
1029 			return node;
1030 		}
1031 
1032 		if (delta < 0)
1033 			p = &parent->rb_left;
1034 		else
1035 			p = &parent->rb_right;
1036 
1037 		if (prefixlen < node->prefixlen) {
1038 			delta = xfrm_policy_addr_delta(addr, &node->addr,
1039 						       prefixlen,
1040 						       family);
1041 			if (delta)
1042 				continue;
1043 
1044 			/* This node is a subnet of the new prefix. It needs
1045 			 * to be removed and re-inserted with the smaller
1046 			 * prefix and all nodes that are now also covered
1047 			 * by the reduced prefixlen.
1048 			 */
1049 			rb_erase(&node->node, root);
1050 
1051 			if (!cached) {
1052 				xfrm_pol_inexact_node_init(node, addr,
1053 							   prefixlen);
1054 				cached = node;
1055 			} else {
1056 				/* This node also falls within the new
1057 				 * prefixlen. Merge the to-be-reinserted
1058 				 * node and this one.
1059 				 */
1060 				xfrm_policy_inexact_node_merge(net, node,
1061 							       cached, family);
1062 				kfree_rcu(node, rcu);
1063 			}
1064 
1065 			/* restart */
1066 			p = &root->rb_node;
1067 			parent = NULL;
1068 		}
1069 	}
1070 
1071 	node = cached;
1072 	if (!node) {
1073 		node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1074 		if (!node)
1075 			return NULL;
1076 	}
1077 
1078 	rb_link_node_rcu(&node->node, parent, p);
1079 	rb_insert_color(&node->node, root);
1080 
1081 	return node;
1082 }
1083 
1084 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1085 {
1086 	struct xfrm_pol_inexact_node *node;
1087 	struct rb_node *rn = rb_first(r);
1088 
1089 	while (rn) {
1090 		node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1091 
1092 		xfrm_policy_inexact_gc_tree(&node->root, rm);
1093 		rn = rb_next(rn);
1094 
1095 		if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1096 			WARN_ON_ONCE(rm);
1097 			continue;
1098 		}
1099 
1100 		rb_erase(&node->node, r);
1101 		kfree_rcu(node, rcu);
1102 	}
1103 }
1104 
1105 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1106 {
1107 	write_seqcount_begin(&b->count);
1108 	xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1109 	xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1110 	write_seqcount_end(&b->count);
1111 
1112 	if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1113 	    !hlist_empty(&b->hhead)) {
1114 		WARN_ON_ONCE(net_exit);
1115 		return;
1116 	}
1117 
1118 	if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1119 				   xfrm_pol_inexact_params) == 0) {
1120 		list_del(&b->inexact_bins);
1121 		kfree_rcu(b, rcu);
1122 	}
1123 }
1124 
1125 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1126 {
1127 	struct net *net = read_pnet(&b->k.net);
1128 
1129 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1130 	__xfrm_policy_inexact_prune_bin(b, false);
1131 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1132 }
1133 
1134 static void __xfrm_policy_inexact_flush(struct net *net)
1135 {
1136 	struct xfrm_pol_inexact_bin *bin, *t;
1137 
1138 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1139 
1140 	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1141 		__xfrm_policy_inexact_prune_bin(bin, false);
1142 }
1143 
1144 static struct hlist_head *
1145 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1146 				struct xfrm_policy *policy, u8 dir)
1147 {
1148 	struct xfrm_pol_inexact_node *n;
1149 	struct net *net;
1150 
1151 	net = xp_net(policy);
1152 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1153 
1154 	if (xfrm_policy_inexact_insert_use_any_list(policy))
1155 		return &bin->hhead;
1156 
1157 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1158 					       policy->family,
1159 					       policy->selector.prefixlen_d)) {
1160 		write_seqcount_begin(&bin->count);
1161 		n = xfrm_policy_inexact_insert_node(net,
1162 						    &bin->root_s,
1163 						    &policy->selector.saddr,
1164 						    policy->family,
1165 						    policy->selector.prefixlen_s,
1166 						    dir);
1167 		write_seqcount_end(&bin->count);
1168 		if (!n)
1169 			return NULL;
1170 
1171 		return &n->hhead;
1172 	}
1173 
1174 	/* daddr is fixed */
1175 	write_seqcount_begin(&bin->count);
1176 	n = xfrm_policy_inexact_insert_node(net,
1177 					    &bin->root_d,
1178 					    &policy->selector.daddr,
1179 					    policy->family,
1180 					    policy->selector.prefixlen_d, dir);
1181 	write_seqcount_end(&bin->count);
1182 	if (!n)
1183 		return NULL;
1184 
1185 	/* saddr is wildcard */
1186 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1187 					       policy->family,
1188 					       policy->selector.prefixlen_s))
1189 		return &n->hhead;
1190 
1191 	write_seqcount_begin(&bin->count);
1192 	n = xfrm_policy_inexact_insert_node(net,
1193 					    &n->root,
1194 					    &policy->selector.saddr,
1195 					    policy->family,
1196 					    policy->selector.prefixlen_s, dir);
1197 	write_seqcount_end(&bin->count);
1198 	if (!n)
1199 		return NULL;
1200 
1201 	return &n->hhead;
1202 }
1203 
1204 static struct xfrm_policy *
1205 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1206 {
1207 	struct xfrm_pol_inexact_bin *bin;
1208 	struct xfrm_policy *delpol;
1209 	struct hlist_head *chain;
1210 	struct net *net;
1211 
1212 	bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1213 	if (!bin)
1214 		return ERR_PTR(-ENOMEM);
1215 
1216 	net = xp_net(policy);
1217 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1218 
1219 	chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1220 	if (!chain) {
1221 		__xfrm_policy_inexact_prune_bin(bin, false);
1222 		return ERR_PTR(-ENOMEM);
1223 	}
1224 
1225 	delpol = xfrm_policy_insert_list(chain, policy, excl);
1226 	if (delpol && excl) {
1227 		__xfrm_policy_inexact_prune_bin(bin, false);
1228 		return ERR_PTR(-EEXIST);
1229 	}
1230 
1231 	chain = &net->xfrm.policy_inexact[dir];
1232 	xfrm_policy_insert_inexact_list(chain, policy);
1233 
1234 	if (delpol)
1235 		__xfrm_policy_inexact_prune_bin(bin, false);
1236 
1237 	return delpol;
1238 }
1239 
1240 static void xfrm_hash_rebuild(struct work_struct *work)
1241 {
1242 	struct net *net = container_of(work, struct net,
1243 				       xfrm.policy_hthresh.work);
1244 	unsigned int hmask;
1245 	struct xfrm_policy *pol;
1246 	struct xfrm_policy *policy;
1247 	struct hlist_head *chain;
1248 	struct hlist_head *odst;
1249 	struct hlist_node *newpos;
1250 	int i;
1251 	int dir;
1252 	unsigned seq;
1253 	u8 lbits4, rbits4, lbits6, rbits6;
1254 
1255 	mutex_lock(&hash_resize_mutex);
1256 
1257 	/* read selector prefixlen thresholds */
1258 	do {
1259 		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1260 
1261 		lbits4 = net->xfrm.policy_hthresh.lbits4;
1262 		rbits4 = net->xfrm.policy_hthresh.rbits4;
1263 		lbits6 = net->xfrm.policy_hthresh.lbits6;
1264 		rbits6 = net->xfrm.policy_hthresh.rbits6;
1265 	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1266 
1267 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1268 	write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1269 
1270 	/* make sure that we can insert the indirect policies again before
1271 	 * we start with destructive action.
1272 	 */
1273 	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1274 		struct xfrm_pol_inexact_bin *bin;
1275 		u8 dbits, sbits;
1276 
1277 		if (policy->walk.dead)
1278 			continue;
1279 
1280 		dir = xfrm_policy_id2dir(policy->index);
1281 		if (dir >= XFRM_POLICY_MAX)
1282 			continue;
1283 
1284 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1285 			if (policy->family == AF_INET) {
1286 				dbits = rbits4;
1287 				sbits = lbits4;
1288 			} else {
1289 				dbits = rbits6;
1290 				sbits = lbits6;
1291 			}
1292 		} else {
1293 			if (policy->family == AF_INET) {
1294 				dbits = lbits4;
1295 				sbits = rbits4;
1296 			} else {
1297 				dbits = lbits6;
1298 				sbits = rbits6;
1299 			}
1300 		}
1301 
1302 		if (policy->selector.prefixlen_d < dbits ||
1303 		    policy->selector.prefixlen_s < sbits)
1304 			continue;
1305 
1306 		bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1307 		if (!bin)
1308 			goto out_unlock;
1309 
1310 		if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1311 			goto out_unlock;
1312 	}
1313 
1314 	/* reset the bydst and inexact table in all directions */
1315 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1316 		struct hlist_node *n;
1317 
1318 		hlist_for_each_entry_safe(policy, n,
1319 					  &net->xfrm.policy_inexact[dir],
1320 					  bydst_inexact_list) {
1321 			hlist_del_rcu(&policy->bydst);
1322 			hlist_del_init(&policy->bydst_inexact_list);
1323 		}
1324 
1325 		hmask = net->xfrm.policy_bydst[dir].hmask;
1326 		odst = net->xfrm.policy_bydst[dir].table;
1327 		for (i = hmask; i >= 0; i--) {
1328 			hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1329 				hlist_del_rcu(&policy->bydst);
1330 		}
1331 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1332 			/* dir out => dst = remote, src = local */
1333 			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1334 			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1335 			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1336 			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1337 		} else {
1338 			/* dir in/fwd => dst = local, src = remote */
1339 			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1340 			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1341 			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1342 			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1343 		}
1344 	}
1345 
1346 	/* re-insert all policies by order of creation */
1347 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1348 		if (policy->walk.dead)
1349 			continue;
1350 		dir = xfrm_policy_id2dir(policy->index);
1351 		if (dir >= XFRM_POLICY_MAX) {
1352 			/* skip socket policies */
1353 			continue;
1354 		}
1355 		newpos = NULL;
1356 		chain = policy_hash_bysel(net, &policy->selector,
1357 					  policy->family, dir);
1358 
1359 		if (!chain) {
1360 			void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1361 
1362 			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1363 			continue;
1364 		}
1365 
1366 		hlist_for_each_entry(pol, chain, bydst) {
1367 			if (policy->priority >= pol->priority)
1368 				newpos = &pol->bydst;
1369 			else
1370 				break;
1371 		}
1372 		if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1373 			hlist_add_behind_rcu(&policy->bydst, newpos);
1374 		else
1375 			hlist_add_head_rcu(&policy->bydst, chain);
1376 	}
1377 
1378 out_unlock:
1379 	__xfrm_policy_inexact_flush(net);
1380 	write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1381 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1382 
1383 	mutex_unlock(&hash_resize_mutex);
1384 }
1385 
1386 void xfrm_policy_hash_rebuild(struct net *net)
1387 {
1388 	schedule_work(&net->xfrm.policy_hthresh.work);
1389 }
1390 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1391 
1392 /* Generate new index... KAME seems to generate them ordered by cost
1393  * of an absolute inpredictability of ordering of rules. This will not pass. */
1394 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1395 {
1396 	for (;;) {
1397 		struct hlist_head *list;
1398 		struct xfrm_policy *p;
1399 		u32 idx;
1400 		int found;
1401 
1402 		if (!index) {
1403 			idx = (net->xfrm.idx_generator | dir);
1404 			net->xfrm.idx_generator += 8;
1405 		} else {
1406 			idx = index;
1407 			index = 0;
1408 		}
1409 
1410 		if (idx == 0)
1411 			idx = 8;
1412 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
1413 		found = 0;
1414 		hlist_for_each_entry(p, list, byidx) {
1415 			if (p->index == idx) {
1416 				found = 1;
1417 				break;
1418 			}
1419 		}
1420 		if (!found)
1421 			return idx;
1422 	}
1423 }
1424 
1425 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1426 {
1427 	u32 *p1 = (u32 *) s1;
1428 	u32 *p2 = (u32 *) s2;
1429 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
1430 	int i;
1431 
1432 	for (i = 0; i < len; i++) {
1433 		if (p1[i] != p2[i])
1434 			return 1;
1435 	}
1436 
1437 	return 0;
1438 }
1439 
1440 static void xfrm_policy_requeue(struct xfrm_policy *old,
1441 				struct xfrm_policy *new)
1442 {
1443 	struct xfrm_policy_queue *pq = &old->polq;
1444 	struct sk_buff_head list;
1445 
1446 	if (skb_queue_empty(&pq->hold_queue))
1447 		return;
1448 
1449 	__skb_queue_head_init(&list);
1450 
1451 	spin_lock_bh(&pq->hold_queue.lock);
1452 	skb_queue_splice_init(&pq->hold_queue, &list);
1453 	if (del_timer(&pq->hold_timer))
1454 		xfrm_pol_put(old);
1455 	spin_unlock_bh(&pq->hold_queue.lock);
1456 
1457 	pq = &new->polq;
1458 
1459 	spin_lock_bh(&pq->hold_queue.lock);
1460 	skb_queue_splice(&list, &pq->hold_queue);
1461 	pq->timeout = XFRM_QUEUE_TMO_MIN;
1462 	if (!mod_timer(&pq->hold_timer, jiffies))
1463 		xfrm_pol_hold(new);
1464 	spin_unlock_bh(&pq->hold_queue.lock);
1465 }
1466 
1467 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1468 					  struct xfrm_policy *pol)
1469 {
1470 	return mark->v == pol->mark.v && mark->m == pol->mark.m;
1471 }
1472 
1473 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1474 {
1475 	const struct xfrm_pol_inexact_key *k = data;
1476 	u32 a = k->type << 24 | k->dir << 16 | k->family;
1477 
1478 	return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1479 			    seed);
1480 }
1481 
1482 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1483 {
1484 	const struct xfrm_pol_inexact_bin *b = data;
1485 
1486 	return xfrm_pol_bin_key(&b->k, 0, seed);
1487 }
1488 
1489 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1490 			    const void *ptr)
1491 {
1492 	const struct xfrm_pol_inexact_key *key = arg->key;
1493 	const struct xfrm_pol_inexact_bin *b = ptr;
1494 	int ret;
1495 
1496 	if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1497 		return -1;
1498 
1499 	ret = b->k.dir ^ key->dir;
1500 	if (ret)
1501 		return ret;
1502 
1503 	ret = b->k.type ^ key->type;
1504 	if (ret)
1505 		return ret;
1506 
1507 	ret = b->k.family ^ key->family;
1508 	if (ret)
1509 		return ret;
1510 
1511 	return b->k.if_id ^ key->if_id;
1512 }
1513 
1514 static const struct rhashtable_params xfrm_pol_inexact_params = {
1515 	.head_offset		= offsetof(struct xfrm_pol_inexact_bin, head),
1516 	.hashfn			= xfrm_pol_bin_key,
1517 	.obj_hashfn		= xfrm_pol_bin_obj,
1518 	.obj_cmpfn		= xfrm_pol_bin_cmp,
1519 	.automatic_shrinking	= true,
1520 };
1521 
1522 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1523 					    struct xfrm_policy *policy)
1524 {
1525 	struct xfrm_policy *pol, *delpol = NULL;
1526 	struct hlist_node *newpos = NULL;
1527 	int i = 0;
1528 
1529 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1530 		if (pol->type == policy->type &&
1531 		    pol->if_id == policy->if_id &&
1532 		    !selector_cmp(&pol->selector, &policy->selector) &&
1533 		    xfrm_policy_mark_match(&policy->mark, pol) &&
1534 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1535 		    !WARN_ON(delpol)) {
1536 			delpol = pol;
1537 			if (policy->priority > pol->priority)
1538 				continue;
1539 		} else if (policy->priority >= pol->priority) {
1540 			newpos = &pol->bydst_inexact_list;
1541 			continue;
1542 		}
1543 		if (delpol)
1544 			break;
1545 	}
1546 
1547 	if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1548 		hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1549 	else
1550 		hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1551 
1552 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1553 		pol->pos = i;
1554 		i++;
1555 	}
1556 }
1557 
1558 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1559 						   struct xfrm_policy *policy,
1560 						   bool excl)
1561 {
1562 	struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1563 
1564 	hlist_for_each_entry(pol, chain, bydst) {
1565 		if (pol->type == policy->type &&
1566 		    pol->if_id == policy->if_id &&
1567 		    !selector_cmp(&pol->selector, &policy->selector) &&
1568 		    xfrm_policy_mark_match(&policy->mark, pol) &&
1569 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1570 		    !WARN_ON(delpol)) {
1571 			if (excl)
1572 				return ERR_PTR(-EEXIST);
1573 			delpol = pol;
1574 			if (policy->priority > pol->priority)
1575 				continue;
1576 		} else if (policy->priority >= pol->priority) {
1577 			newpos = pol;
1578 			continue;
1579 		}
1580 		if (delpol)
1581 			break;
1582 	}
1583 
1584 	if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1585 		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1586 	else
1587 		/* Packet offload policies enter to the head
1588 		 * to speed-up lookups.
1589 		 */
1590 		hlist_add_head_rcu(&policy->bydst, chain);
1591 
1592 	return delpol;
1593 }
1594 
1595 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1596 {
1597 	struct net *net = xp_net(policy);
1598 	struct xfrm_policy *delpol;
1599 	struct hlist_head *chain;
1600 
1601 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1602 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1603 	if (chain)
1604 		delpol = xfrm_policy_insert_list(chain, policy, excl);
1605 	else
1606 		delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1607 
1608 	if (IS_ERR(delpol)) {
1609 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1610 		return PTR_ERR(delpol);
1611 	}
1612 
1613 	__xfrm_policy_link(policy, dir);
1614 
1615 	/* After previous checking, family can either be AF_INET or AF_INET6 */
1616 	if (policy->family == AF_INET)
1617 		rt_genid_bump_ipv4(net);
1618 	else
1619 		rt_genid_bump_ipv6(net);
1620 
1621 	if (delpol) {
1622 		xfrm_policy_requeue(delpol, policy);
1623 		__xfrm_policy_unlink(delpol, dir);
1624 	}
1625 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1626 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1627 	policy->curlft.add_time = ktime_get_real_seconds();
1628 	policy->curlft.use_time = 0;
1629 	if (!mod_timer(&policy->timer, jiffies + HZ))
1630 		xfrm_pol_hold(policy);
1631 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1632 
1633 	if (delpol)
1634 		xfrm_policy_kill(delpol);
1635 	else if (xfrm_bydst_should_resize(net, dir, NULL))
1636 		schedule_work(&net->xfrm.policy_hash_work);
1637 
1638 	return 0;
1639 }
1640 EXPORT_SYMBOL(xfrm_policy_insert);
1641 
1642 static struct xfrm_policy *
1643 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1644 			u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1645 			struct xfrm_sec_ctx *ctx)
1646 {
1647 	struct xfrm_policy *pol;
1648 
1649 	if (!chain)
1650 		return NULL;
1651 
1652 	hlist_for_each_entry(pol, chain, bydst) {
1653 		if (pol->type == type &&
1654 		    pol->if_id == if_id &&
1655 		    xfrm_policy_mark_match(mark, pol) &&
1656 		    !selector_cmp(sel, &pol->selector) &&
1657 		    xfrm_sec_ctx_match(ctx, pol->security))
1658 			return pol;
1659 	}
1660 
1661 	return NULL;
1662 }
1663 
1664 struct xfrm_policy *
1665 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1666 		      u8 type, int dir, struct xfrm_selector *sel,
1667 		      struct xfrm_sec_ctx *ctx, int delete, int *err)
1668 {
1669 	struct xfrm_pol_inexact_bin *bin = NULL;
1670 	struct xfrm_policy *pol, *ret = NULL;
1671 	struct hlist_head *chain;
1672 
1673 	*err = 0;
1674 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1675 	chain = policy_hash_bysel(net, sel, sel->family, dir);
1676 	if (!chain) {
1677 		struct xfrm_pol_inexact_candidates cand;
1678 		int i;
1679 
1680 		bin = xfrm_policy_inexact_lookup(net, type,
1681 						 sel->family, dir, if_id);
1682 		if (!bin) {
1683 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1684 			return NULL;
1685 		}
1686 
1687 		if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1688 							 &sel->saddr,
1689 							 &sel->daddr)) {
1690 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1691 			return NULL;
1692 		}
1693 
1694 		pol = NULL;
1695 		for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1696 			struct xfrm_policy *tmp;
1697 
1698 			tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1699 						      if_id, type, dir,
1700 						      sel, ctx);
1701 			if (!tmp)
1702 				continue;
1703 
1704 			if (!pol || tmp->pos < pol->pos)
1705 				pol = tmp;
1706 		}
1707 	} else {
1708 		pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1709 					      sel, ctx);
1710 	}
1711 
1712 	if (pol) {
1713 		xfrm_pol_hold(pol);
1714 		if (delete) {
1715 			*err = security_xfrm_policy_delete(pol->security);
1716 			if (*err) {
1717 				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1718 				return pol;
1719 			}
1720 			__xfrm_policy_unlink(pol, dir);
1721 		}
1722 		ret = pol;
1723 	}
1724 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1725 
1726 	if (ret && delete)
1727 		xfrm_policy_kill(ret);
1728 	if (bin && delete)
1729 		xfrm_policy_inexact_prune_bin(bin);
1730 	return ret;
1731 }
1732 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1733 
1734 struct xfrm_policy *
1735 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1736 		 u8 type, int dir, u32 id, int delete, int *err)
1737 {
1738 	struct xfrm_policy *pol, *ret;
1739 	struct hlist_head *chain;
1740 
1741 	*err = -ENOENT;
1742 	if (xfrm_policy_id2dir(id) != dir)
1743 		return NULL;
1744 
1745 	*err = 0;
1746 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1747 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
1748 	ret = NULL;
1749 	hlist_for_each_entry(pol, chain, byidx) {
1750 		if (pol->type == type && pol->index == id &&
1751 		    pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1752 			xfrm_pol_hold(pol);
1753 			if (delete) {
1754 				*err = security_xfrm_policy_delete(
1755 								pol->security);
1756 				if (*err) {
1757 					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1758 					return pol;
1759 				}
1760 				__xfrm_policy_unlink(pol, dir);
1761 			}
1762 			ret = pol;
1763 			break;
1764 		}
1765 	}
1766 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1767 
1768 	if (ret && delete)
1769 		xfrm_policy_kill(ret);
1770 	return ret;
1771 }
1772 EXPORT_SYMBOL(xfrm_policy_byid);
1773 
1774 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1775 static inline int
1776 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1777 {
1778 	struct xfrm_policy *pol;
1779 	int err = 0;
1780 
1781 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1782 		if (pol->walk.dead ||
1783 		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1784 		    pol->type != type)
1785 			continue;
1786 
1787 		err = security_xfrm_policy_delete(pol->security);
1788 		if (err) {
1789 			xfrm_audit_policy_delete(pol, 0, task_valid);
1790 			return err;
1791 		}
1792 	}
1793 	return err;
1794 }
1795 
1796 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1797 						     struct net_device *dev,
1798 						     bool task_valid)
1799 {
1800 	struct xfrm_policy *pol;
1801 	int err = 0;
1802 
1803 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1804 		if (pol->walk.dead ||
1805 		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1806 		    pol->xdo.dev != dev)
1807 			continue;
1808 
1809 		err = security_xfrm_policy_delete(pol->security);
1810 		if (err) {
1811 			xfrm_audit_policy_delete(pol, 0, task_valid);
1812 			return err;
1813 		}
1814 	}
1815 	return err;
1816 }
1817 #else
1818 static inline int
1819 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1820 {
1821 	return 0;
1822 }
1823 
1824 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1825 						     struct net_device *dev,
1826 						     bool task_valid)
1827 {
1828 	return 0;
1829 }
1830 #endif
1831 
1832 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1833 {
1834 	int dir, err = 0, cnt = 0;
1835 	struct xfrm_policy *pol;
1836 
1837 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1838 
1839 	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1840 	if (err)
1841 		goto out;
1842 
1843 again:
1844 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1845 		if (pol->walk.dead)
1846 			continue;
1847 
1848 		dir = xfrm_policy_id2dir(pol->index);
1849 		if (dir >= XFRM_POLICY_MAX ||
1850 		    pol->type != type)
1851 			continue;
1852 
1853 		__xfrm_policy_unlink(pol, dir);
1854 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1855 		cnt++;
1856 		xfrm_audit_policy_delete(pol, 1, task_valid);
1857 		xfrm_policy_kill(pol);
1858 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1859 		goto again;
1860 	}
1861 	if (cnt)
1862 		__xfrm_policy_inexact_flush(net);
1863 	else
1864 		err = -ESRCH;
1865 out:
1866 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1867 	return err;
1868 }
1869 EXPORT_SYMBOL(xfrm_policy_flush);
1870 
1871 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1872 			  bool task_valid)
1873 {
1874 	int dir, err = 0, cnt = 0;
1875 	struct xfrm_policy *pol;
1876 
1877 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1878 
1879 	err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
1880 	if (err)
1881 		goto out;
1882 
1883 again:
1884 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1885 		if (pol->walk.dead)
1886 			continue;
1887 
1888 		dir = xfrm_policy_id2dir(pol->index);
1889 		if (dir >= XFRM_POLICY_MAX ||
1890 		    pol->xdo.dev != dev)
1891 			continue;
1892 
1893 		__xfrm_policy_unlink(pol, dir);
1894 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1895 		cnt++;
1896 		xfrm_audit_policy_delete(pol, 1, task_valid);
1897 		xfrm_policy_kill(pol);
1898 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1899 		goto again;
1900 	}
1901 	if (cnt)
1902 		__xfrm_policy_inexact_flush(net);
1903 	else
1904 		err = -ESRCH;
1905 out:
1906 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1907 	return err;
1908 }
1909 EXPORT_SYMBOL(xfrm_dev_policy_flush);
1910 
1911 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1912 		     int (*func)(struct xfrm_policy *, int, int, void*),
1913 		     void *data)
1914 {
1915 	struct xfrm_policy *pol;
1916 	struct xfrm_policy_walk_entry *x;
1917 	int error = 0;
1918 
1919 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1920 	    walk->type != XFRM_POLICY_TYPE_ANY)
1921 		return -EINVAL;
1922 
1923 	if (list_empty(&walk->walk.all) && walk->seq != 0)
1924 		return 0;
1925 
1926 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1927 	if (list_empty(&walk->walk.all))
1928 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1929 	else
1930 		x = list_first_entry(&walk->walk.all,
1931 				     struct xfrm_policy_walk_entry, all);
1932 
1933 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1934 		if (x->dead)
1935 			continue;
1936 		pol = container_of(x, struct xfrm_policy, walk);
1937 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1938 		    walk->type != pol->type)
1939 			continue;
1940 		error = func(pol, xfrm_policy_id2dir(pol->index),
1941 			     walk->seq, data);
1942 		if (error) {
1943 			list_move_tail(&walk->walk.all, &x->all);
1944 			goto out;
1945 		}
1946 		walk->seq++;
1947 	}
1948 	if (walk->seq == 0) {
1949 		error = -ENOENT;
1950 		goto out;
1951 	}
1952 	list_del_init(&walk->walk.all);
1953 out:
1954 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1955 	return error;
1956 }
1957 EXPORT_SYMBOL(xfrm_policy_walk);
1958 
1959 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1960 {
1961 	INIT_LIST_HEAD(&walk->walk.all);
1962 	walk->walk.dead = 1;
1963 	walk->type = type;
1964 	walk->seq = 0;
1965 }
1966 EXPORT_SYMBOL(xfrm_policy_walk_init);
1967 
1968 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1969 {
1970 	if (list_empty(&walk->walk.all))
1971 		return;
1972 
1973 	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1974 	list_del(&walk->walk.all);
1975 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1976 }
1977 EXPORT_SYMBOL(xfrm_policy_walk_done);
1978 
1979 /*
1980  * Find policy to apply to this flow.
1981  *
1982  * Returns 0 if policy found, else an -errno.
1983  */
1984 static int xfrm_policy_match(const struct xfrm_policy *pol,
1985 			     const struct flowi *fl,
1986 			     u8 type, u16 family, u32 if_id)
1987 {
1988 	const struct xfrm_selector *sel = &pol->selector;
1989 	int ret = -ESRCH;
1990 	bool match;
1991 
1992 	if (pol->family != family ||
1993 	    pol->if_id != if_id ||
1994 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1995 	    pol->type != type)
1996 		return ret;
1997 
1998 	match = xfrm_selector_match(sel, fl, family);
1999 	if (match)
2000 		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
2001 	return ret;
2002 }
2003 
2004 static struct xfrm_pol_inexact_node *
2005 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
2006 				seqcount_spinlock_t *count,
2007 				const xfrm_address_t *addr, u16 family)
2008 {
2009 	const struct rb_node *parent;
2010 	int seq;
2011 
2012 again:
2013 	seq = read_seqcount_begin(count);
2014 
2015 	parent = rcu_dereference_raw(r->rb_node);
2016 	while (parent) {
2017 		struct xfrm_pol_inexact_node *node;
2018 		int delta;
2019 
2020 		node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
2021 
2022 		delta = xfrm_policy_addr_delta(addr, &node->addr,
2023 					       node->prefixlen, family);
2024 		if (delta < 0) {
2025 			parent = rcu_dereference_raw(parent->rb_left);
2026 			continue;
2027 		} else if (delta > 0) {
2028 			parent = rcu_dereference_raw(parent->rb_right);
2029 			continue;
2030 		}
2031 
2032 		return node;
2033 	}
2034 
2035 	if (read_seqcount_retry(count, seq))
2036 		goto again;
2037 
2038 	return NULL;
2039 }
2040 
2041 static bool
2042 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
2043 				    struct xfrm_pol_inexact_bin *b,
2044 				    const xfrm_address_t *saddr,
2045 				    const xfrm_address_t *daddr)
2046 {
2047 	struct xfrm_pol_inexact_node *n;
2048 	u16 family;
2049 
2050 	if (!b)
2051 		return false;
2052 
2053 	family = b->k.family;
2054 	memset(cand, 0, sizeof(*cand));
2055 	cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
2056 
2057 	n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
2058 					    family);
2059 	if (n) {
2060 		cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
2061 		n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
2062 						    family);
2063 		if (n)
2064 			cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
2065 	}
2066 
2067 	n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
2068 					    family);
2069 	if (n)
2070 		cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
2071 
2072 	return true;
2073 }
2074 
2075 static struct xfrm_pol_inexact_bin *
2076 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
2077 			       u8 dir, u32 if_id)
2078 {
2079 	struct xfrm_pol_inexact_key k = {
2080 		.family = family,
2081 		.type = type,
2082 		.dir = dir,
2083 		.if_id = if_id,
2084 	};
2085 
2086 	write_pnet(&k.net, net);
2087 
2088 	return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
2089 				 xfrm_pol_inexact_params);
2090 }
2091 
2092 static struct xfrm_pol_inexact_bin *
2093 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
2094 			   u8 dir, u32 if_id)
2095 {
2096 	struct xfrm_pol_inexact_bin *bin;
2097 
2098 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2099 
2100 	rcu_read_lock();
2101 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2102 	rcu_read_unlock();
2103 
2104 	return bin;
2105 }
2106 
2107 static struct xfrm_policy *
2108 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2109 			      struct xfrm_policy *prefer,
2110 			      const struct flowi *fl,
2111 			      u8 type, u16 family, u32 if_id)
2112 {
2113 	u32 priority = prefer ? prefer->priority : ~0u;
2114 	struct xfrm_policy *pol;
2115 
2116 	if (!chain)
2117 		return NULL;
2118 
2119 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2120 		int err;
2121 
2122 		if (pol->priority > priority)
2123 			break;
2124 
2125 		err = xfrm_policy_match(pol, fl, type, family, if_id);
2126 		if (err) {
2127 			if (err != -ESRCH)
2128 				return ERR_PTR(err);
2129 
2130 			continue;
2131 		}
2132 
2133 		if (prefer) {
2134 			/* matches.  Is it older than *prefer? */
2135 			if (pol->priority == priority &&
2136 			    prefer->pos < pol->pos)
2137 				return prefer;
2138 		}
2139 
2140 		return pol;
2141 	}
2142 
2143 	return NULL;
2144 }
2145 
2146 static struct xfrm_policy *
2147 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2148 			    struct xfrm_policy *prefer,
2149 			    const struct flowi *fl,
2150 			    u8 type, u16 family, u32 if_id)
2151 {
2152 	struct xfrm_policy *tmp;
2153 	int i;
2154 
2155 	for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2156 		tmp = __xfrm_policy_eval_candidates(cand->res[i],
2157 						    prefer,
2158 						    fl, type, family, if_id);
2159 		if (!tmp)
2160 			continue;
2161 
2162 		if (IS_ERR(tmp))
2163 			return tmp;
2164 		prefer = tmp;
2165 	}
2166 
2167 	return prefer;
2168 }
2169 
2170 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2171 						     const struct flowi *fl,
2172 						     u16 family, u8 dir,
2173 						     u32 if_id)
2174 {
2175 	struct xfrm_pol_inexact_candidates cand;
2176 	const xfrm_address_t *daddr, *saddr;
2177 	struct xfrm_pol_inexact_bin *bin;
2178 	struct xfrm_policy *pol, *ret;
2179 	struct hlist_head *chain;
2180 	unsigned int sequence;
2181 	int err;
2182 
2183 	daddr = xfrm_flowi_daddr(fl, family);
2184 	saddr = xfrm_flowi_saddr(fl, family);
2185 	if (unlikely(!daddr || !saddr))
2186 		return NULL;
2187 
2188 	rcu_read_lock();
2189  retry:
2190 	do {
2191 		sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2192 		chain = policy_hash_direct(net, daddr, saddr, family, dir);
2193 	} while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2194 
2195 	ret = NULL;
2196 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2197 		err = xfrm_policy_match(pol, fl, type, family, if_id);
2198 		if (err) {
2199 			if (err == -ESRCH)
2200 				continue;
2201 			else {
2202 				ret = ERR_PTR(err);
2203 				goto fail;
2204 			}
2205 		} else {
2206 			ret = pol;
2207 			break;
2208 		}
2209 	}
2210 	if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
2211 		goto skip_inexact;
2212 
2213 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2214 	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2215 							 daddr))
2216 		goto skip_inexact;
2217 
2218 	pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2219 					  family, if_id);
2220 	if (pol) {
2221 		ret = pol;
2222 		if (IS_ERR(pol))
2223 			goto fail;
2224 	}
2225 
2226 skip_inexact:
2227 	if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2228 		goto retry;
2229 
2230 	if (ret && !xfrm_pol_hold_rcu(ret))
2231 		goto retry;
2232 fail:
2233 	rcu_read_unlock();
2234 
2235 	return ret;
2236 }
2237 
2238 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2239 					      const struct flowi *fl,
2240 					      u16 family, u8 dir, u32 if_id)
2241 {
2242 #ifdef CONFIG_XFRM_SUB_POLICY
2243 	struct xfrm_policy *pol;
2244 
2245 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2246 					dir, if_id);
2247 	if (pol != NULL)
2248 		return pol;
2249 #endif
2250 	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2251 					 dir, if_id);
2252 }
2253 
2254 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2255 						 const struct flowi *fl,
2256 						 u16 family, u32 if_id)
2257 {
2258 	struct xfrm_policy *pol;
2259 
2260 	rcu_read_lock();
2261  again:
2262 	pol = rcu_dereference(sk->sk_policy[dir]);
2263 	if (pol != NULL) {
2264 		bool match;
2265 		int err = 0;
2266 
2267 		if (pol->family != family) {
2268 			pol = NULL;
2269 			goto out;
2270 		}
2271 
2272 		match = xfrm_selector_match(&pol->selector, fl, family);
2273 		if (match) {
2274 			if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
2275 			    pol->if_id != if_id) {
2276 				pol = NULL;
2277 				goto out;
2278 			}
2279 			err = security_xfrm_policy_lookup(pol->security,
2280 						      fl->flowi_secid);
2281 			if (!err) {
2282 				if (!xfrm_pol_hold_rcu(pol))
2283 					goto again;
2284 			} else if (err == -ESRCH) {
2285 				pol = NULL;
2286 			} else {
2287 				pol = ERR_PTR(err);
2288 			}
2289 		} else
2290 			pol = NULL;
2291 	}
2292 out:
2293 	rcu_read_unlock();
2294 	return pol;
2295 }
2296 
2297 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2298 {
2299 	struct net *net = xp_net(pol);
2300 
2301 	list_add(&pol->walk.all, &net->xfrm.policy_all);
2302 	net->xfrm.policy_count[dir]++;
2303 	xfrm_pol_hold(pol);
2304 }
2305 
2306 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2307 						int dir)
2308 {
2309 	struct net *net = xp_net(pol);
2310 
2311 	if (list_empty(&pol->walk.all))
2312 		return NULL;
2313 
2314 	/* Socket policies are not hashed. */
2315 	if (!hlist_unhashed(&pol->bydst)) {
2316 		hlist_del_rcu(&pol->bydst);
2317 		hlist_del_init(&pol->bydst_inexact_list);
2318 		hlist_del(&pol->byidx);
2319 	}
2320 
2321 	list_del_init(&pol->walk.all);
2322 	net->xfrm.policy_count[dir]--;
2323 
2324 	return pol;
2325 }
2326 
2327 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2328 {
2329 	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2330 }
2331 
2332 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2333 {
2334 	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2335 }
2336 
2337 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2338 {
2339 	struct net *net = xp_net(pol);
2340 
2341 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2342 	pol = __xfrm_policy_unlink(pol, dir);
2343 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2344 	if (pol) {
2345 		xfrm_policy_kill(pol);
2346 		return 0;
2347 	}
2348 	return -ENOENT;
2349 }
2350 EXPORT_SYMBOL(xfrm_policy_delete);
2351 
2352 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2353 {
2354 	struct net *net = sock_net(sk);
2355 	struct xfrm_policy *old_pol;
2356 
2357 #ifdef CONFIG_XFRM_SUB_POLICY
2358 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2359 		return -EINVAL;
2360 #endif
2361 
2362 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2363 	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2364 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2365 	if (pol) {
2366 		pol->curlft.add_time = ktime_get_real_seconds();
2367 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2368 		xfrm_sk_policy_link(pol, dir);
2369 	}
2370 	rcu_assign_pointer(sk->sk_policy[dir], pol);
2371 	if (old_pol) {
2372 		if (pol)
2373 			xfrm_policy_requeue(old_pol, pol);
2374 
2375 		/* Unlinking succeeds always. This is the only function
2376 		 * allowed to delete or replace socket policy.
2377 		 */
2378 		xfrm_sk_policy_unlink(old_pol, dir);
2379 	}
2380 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2381 
2382 	if (old_pol) {
2383 		xfrm_policy_kill(old_pol);
2384 	}
2385 	return 0;
2386 }
2387 
2388 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2389 {
2390 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2391 	struct net *net = xp_net(old);
2392 
2393 	if (newp) {
2394 		newp->selector = old->selector;
2395 		if (security_xfrm_policy_clone(old->security,
2396 					       &newp->security)) {
2397 			kfree(newp);
2398 			return NULL;  /* ENOMEM */
2399 		}
2400 		newp->lft = old->lft;
2401 		newp->curlft = old->curlft;
2402 		newp->mark = old->mark;
2403 		newp->if_id = old->if_id;
2404 		newp->action = old->action;
2405 		newp->flags = old->flags;
2406 		newp->xfrm_nr = old->xfrm_nr;
2407 		newp->index = old->index;
2408 		newp->type = old->type;
2409 		newp->family = old->family;
2410 		memcpy(newp->xfrm_vec, old->xfrm_vec,
2411 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2412 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2413 		xfrm_sk_policy_link(newp, dir);
2414 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2415 		xfrm_pol_put(newp);
2416 	}
2417 	return newp;
2418 }
2419 
2420 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2421 {
2422 	const struct xfrm_policy *p;
2423 	struct xfrm_policy *np;
2424 	int i, ret = 0;
2425 
2426 	rcu_read_lock();
2427 	for (i = 0; i < 2; i++) {
2428 		p = rcu_dereference(osk->sk_policy[i]);
2429 		if (p) {
2430 			np = clone_policy(p, i);
2431 			if (unlikely(!np)) {
2432 				ret = -ENOMEM;
2433 				break;
2434 			}
2435 			rcu_assign_pointer(sk->sk_policy[i], np);
2436 		}
2437 	}
2438 	rcu_read_unlock();
2439 	return ret;
2440 }
2441 
2442 static int
2443 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2444 	       xfrm_address_t *remote, unsigned short family, u32 mark)
2445 {
2446 	int err;
2447 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2448 
2449 	if (unlikely(afinfo == NULL))
2450 		return -EINVAL;
2451 	err = afinfo->get_saddr(net, oif, local, remote, mark);
2452 	rcu_read_unlock();
2453 	return err;
2454 }
2455 
2456 /* Resolve list of templates for the flow, given policy. */
2457 
2458 static int
2459 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2460 		      struct xfrm_state **xfrm, unsigned short family)
2461 {
2462 	struct net *net = xp_net(policy);
2463 	int nx;
2464 	int i, error;
2465 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2466 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2467 	xfrm_address_t tmp;
2468 
2469 	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2470 		struct xfrm_state *x;
2471 		xfrm_address_t *remote = daddr;
2472 		xfrm_address_t *local  = saddr;
2473 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2474 
2475 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
2476 		    tmpl->mode == XFRM_MODE_BEET) {
2477 			remote = &tmpl->id.daddr;
2478 			local = &tmpl->saddr;
2479 			if (xfrm_addr_any(local, tmpl->encap_family)) {
2480 				error = xfrm_get_saddr(net, fl->flowi_oif,
2481 						       &tmp, remote,
2482 						       tmpl->encap_family, 0);
2483 				if (error)
2484 					goto fail;
2485 				local = &tmp;
2486 			}
2487 		}
2488 
2489 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2490 				    family, policy->if_id);
2491 		if (x && x->dir && x->dir != XFRM_SA_DIR_OUT) {
2492 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEDIRERROR);
2493 			xfrm_state_put(x);
2494 			error = -EINVAL;
2495 			goto fail;
2496 		}
2497 
2498 		if (x && x->km.state == XFRM_STATE_VALID) {
2499 			xfrm[nx++] = x;
2500 			daddr = remote;
2501 			saddr = local;
2502 			continue;
2503 		}
2504 		if (x) {
2505 			error = (x->km.state == XFRM_STATE_ERROR ?
2506 				 -EINVAL : -EAGAIN);
2507 			xfrm_state_put(x);
2508 		} else if (error == -ESRCH) {
2509 			error = -EAGAIN;
2510 		}
2511 
2512 		if (!tmpl->optional)
2513 			goto fail;
2514 	}
2515 	return nx;
2516 
2517 fail:
2518 	for (nx--; nx >= 0; nx--)
2519 		xfrm_state_put(xfrm[nx]);
2520 	return error;
2521 }
2522 
2523 static int
2524 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2525 		  struct xfrm_state **xfrm, unsigned short family)
2526 {
2527 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
2528 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2529 	int cnx = 0;
2530 	int error;
2531 	int ret;
2532 	int i;
2533 
2534 	for (i = 0; i < npols; i++) {
2535 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2536 			error = -ENOBUFS;
2537 			goto fail;
2538 		}
2539 
2540 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2541 		if (ret < 0) {
2542 			error = ret;
2543 			goto fail;
2544 		} else
2545 			cnx += ret;
2546 	}
2547 
2548 	/* found states are sorted for outbound processing */
2549 	if (npols > 1)
2550 		xfrm_state_sort(xfrm, tpp, cnx, family);
2551 
2552 	return cnx;
2553 
2554  fail:
2555 	for (cnx--; cnx >= 0; cnx--)
2556 		xfrm_state_put(tpp[cnx]);
2557 	return error;
2558 
2559 }
2560 
2561 static int xfrm_get_tos(const struct flowi *fl, int family)
2562 {
2563 	if (family == AF_INET)
2564 		return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2565 
2566 	return 0;
2567 }
2568 
2569 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2570 {
2571 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2572 	struct dst_ops *dst_ops;
2573 	struct xfrm_dst *xdst;
2574 
2575 	if (!afinfo)
2576 		return ERR_PTR(-EINVAL);
2577 
2578 	switch (family) {
2579 	case AF_INET:
2580 		dst_ops = &net->xfrm.xfrm4_dst_ops;
2581 		break;
2582 #if IS_ENABLED(CONFIG_IPV6)
2583 	case AF_INET6:
2584 		dst_ops = &net->xfrm.xfrm6_dst_ops;
2585 		break;
2586 #endif
2587 	default:
2588 		BUG();
2589 	}
2590 	xdst = dst_alloc(dst_ops, NULL, DST_OBSOLETE_NONE, 0);
2591 
2592 	if (likely(xdst)) {
2593 		memset_after(xdst, 0, u.dst);
2594 	} else
2595 		xdst = ERR_PTR(-ENOBUFS);
2596 
2597 	rcu_read_unlock();
2598 
2599 	return xdst;
2600 }
2601 
2602 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2603 			   int nfheader_len)
2604 {
2605 	if (dst->ops->family == AF_INET6) {
2606 		path->path_cookie = rt6_get_cookie(dst_rt6_info(dst));
2607 		path->u.rt6.rt6i_nfheader_len = nfheader_len;
2608 	}
2609 }
2610 
2611 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2612 				const struct flowi *fl)
2613 {
2614 	const struct xfrm_policy_afinfo *afinfo =
2615 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2616 	int err;
2617 
2618 	if (!afinfo)
2619 		return -EINVAL;
2620 
2621 	err = afinfo->fill_dst(xdst, dev, fl);
2622 
2623 	rcu_read_unlock();
2624 
2625 	return err;
2626 }
2627 
2628 
2629 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2630  * all the metrics... Shortly, bundle a bundle.
2631  */
2632 
2633 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2634 					    struct xfrm_state **xfrm,
2635 					    struct xfrm_dst **bundle,
2636 					    int nx,
2637 					    const struct flowi *fl,
2638 					    struct dst_entry *dst)
2639 {
2640 	const struct xfrm_state_afinfo *afinfo;
2641 	const struct xfrm_mode *inner_mode;
2642 	struct net *net = xp_net(policy);
2643 	unsigned long now = jiffies;
2644 	struct net_device *dev;
2645 	struct xfrm_dst *xdst_prev = NULL;
2646 	struct xfrm_dst *xdst0 = NULL;
2647 	int i = 0;
2648 	int err;
2649 	int header_len = 0;
2650 	int nfheader_len = 0;
2651 	int trailer_len = 0;
2652 	int tos;
2653 	int family = policy->selector.family;
2654 	xfrm_address_t saddr, daddr;
2655 
2656 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2657 
2658 	tos = xfrm_get_tos(fl, family);
2659 
2660 	dst_hold(dst);
2661 
2662 	for (; i < nx; i++) {
2663 		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2664 		struct dst_entry *dst1 = &xdst->u.dst;
2665 
2666 		err = PTR_ERR(xdst);
2667 		if (IS_ERR(xdst)) {
2668 			dst_release(dst);
2669 			goto put_states;
2670 		}
2671 
2672 		bundle[i] = xdst;
2673 		if (!xdst_prev)
2674 			xdst0 = xdst;
2675 		else
2676 			/* Ref count is taken during xfrm_alloc_dst()
2677 			 * No need to do dst_clone() on dst1
2678 			 */
2679 			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2680 
2681 		if (xfrm[i]->sel.family == AF_UNSPEC) {
2682 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
2683 							xfrm_af2proto(family));
2684 			if (!inner_mode) {
2685 				err = -EAFNOSUPPORT;
2686 				dst_release(dst);
2687 				goto put_states;
2688 			}
2689 		} else
2690 			inner_mode = &xfrm[i]->inner_mode;
2691 
2692 		xdst->route = dst;
2693 		dst_copy_metrics(dst1, dst);
2694 
2695 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2696 			__u32 mark = 0;
2697 			int oif;
2698 
2699 			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2700 				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2701 
2702 			if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
2703 				family = xfrm[i]->props.family;
2704 
2705 			oif = fl->flowi_oif ? : fl->flowi_l3mdev;
2706 			dst = xfrm_dst_lookup(xfrm[i], tos, oif,
2707 					      &saddr, &daddr, family, mark);
2708 			err = PTR_ERR(dst);
2709 			if (IS_ERR(dst))
2710 				goto put_states;
2711 		} else
2712 			dst_hold(dst);
2713 
2714 		dst1->xfrm = xfrm[i];
2715 		xdst->xfrm_genid = xfrm[i]->genid;
2716 
2717 		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2718 		dst1->lastuse = now;
2719 
2720 		dst1->input = dst_discard;
2721 
2722 		rcu_read_lock();
2723 		afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2724 		if (likely(afinfo))
2725 			dst1->output = afinfo->output;
2726 		else
2727 			dst1->output = dst_discard_out;
2728 		rcu_read_unlock();
2729 
2730 		xdst_prev = xdst;
2731 
2732 		header_len += xfrm[i]->props.header_len;
2733 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2734 			nfheader_len += xfrm[i]->props.header_len;
2735 		trailer_len += xfrm[i]->props.trailer_len;
2736 	}
2737 
2738 	xfrm_dst_set_child(xdst_prev, dst);
2739 	xdst0->path = dst;
2740 
2741 	err = -ENODEV;
2742 	dev = dst->dev;
2743 	if (!dev)
2744 		goto free_dst;
2745 
2746 	xfrm_init_path(xdst0, dst, nfheader_len);
2747 	xfrm_init_pmtu(bundle, nx);
2748 
2749 	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2750 	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2751 		err = xfrm_fill_dst(xdst_prev, dev, fl);
2752 		if (err)
2753 			goto free_dst;
2754 
2755 		xdst_prev->u.dst.header_len = header_len;
2756 		xdst_prev->u.dst.trailer_len = trailer_len;
2757 		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2758 		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2759 	}
2760 
2761 	return &xdst0->u.dst;
2762 
2763 put_states:
2764 	for (; i < nx; i++)
2765 		xfrm_state_put(xfrm[i]);
2766 free_dst:
2767 	if (xdst0)
2768 		dst_release_immediate(&xdst0->u.dst);
2769 
2770 	return ERR_PTR(err);
2771 }
2772 
2773 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2774 				struct xfrm_policy **pols,
2775 				int *num_pols, int *num_xfrms)
2776 {
2777 	int i;
2778 
2779 	if (*num_pols == 0 || !pols[0]) {
2780 		*num_pols = 0;
2781 		*num_xfrms = 0;
2782 		return 0;
2783 	}
2784 	if (IS_ERR(pols[0])) {
2785 		*num_pols = 0;
2786 		return PTR_ERR(pols[0]);
2787 	}
2788 
2789 	*num_xfrms = pols[0]->xfrm_nr;
2790 
2791 #ifdef CONFIG_XFRM_SUB_POLICY
2792 	if (pols[0]->action == XFRM_POLICY_ALLOW &&
2793 	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2794 		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2795 						    XFRM_POLICY_TYPE_MAIN,
2796 						    fl, family,
2797 						    XFRM_POLICY_OUT,
2798 						    pols[0]->if_id);
2799 		if (pols[1]) {
2800 			if (IS_ERR(pols[1])) {
2801 				xfrm_pols_put(pols, *num_pols);
2802 				*num_pols = 0;
2803 				return PTR_ERR(pols[1]);
2804 			}
2805 			(*num_pols)++;
2806 			(*num_xfrms) += pols[1]->xfrm_nr;
2807 		}
2808 	}
2809 #endif
2810 	for (i = 0; i < *num_pols; i++) {
2811 		if (pols[i]->action != XFRM_POLICY_ALLOW) {
2812 			*num_xfrms = -1;
2813 			break;
2814 		}
2815 	}
2816 
2817 	return 0;
2818 
2819 }
2820 
2821 static struct xfrm_dst *
2822 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2823 			       const struct flowi *fl, u16 family,
2824 			       struct dst_entry *dst_orig)
2825 {
2826 	struct net *net = xp_net(pols[0]);
2827 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2828 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2829 	struct xfrm_dst *xdst;
2830 	struct dst_entry *dst;
2831 	int err;
2832 
2833 	/* Try to instantiate a bundle */
2834 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2835 	if (err <= 0) {
2836 		if (err == 0)
2837 			return NULL;
2838 
2839 		if (err != -EAGAIN)
2840 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2841 		return ERR_PTR(err);
2842 	}
2843 
2844 	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2845 	if (IS_ERR(dst)) {
2846 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2847 		return ERR_CAST(dst);
2848 	}
2849 
2850 	xdst = (struct xfrm_dst *)dst;
2851 	xdst->num_xfrms = err;
2852 	xdst->num_pols = num_pols;
2853 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2854 	xdst->policy_genid = atomic_read(&pols[0]->genid);
2855 
2856 	return xdst;
2857 }
2858 
2859 static void xfrm_policy_queue_process(struct timer_list *t)
2860 {
2861 	struct sk_buff *skb;
2862 	struct sock *sk;
2863 	struct dst_entry *dst;
2864 	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2865 	struct net *net = xp_net(pol);
2866 	struct xfrm_policy_queue *pq = &pol->polq;
2867 	struct flowi fl;
2868 	struct sk_buff_head list;
2869 	__u32 skb_mark;
2870 
2871 	spin_lock(&pq->hold_queue.lock);
2872 	skb = skb_peek(&pq->hold_queue);
2873 	if (!skb) {
2874 		spin_unlock(&pq->hold_queue.lock);
2875 		goto out;
2876 	}
2877 	dst = skb_dst(skb);
2878 	sk = skb->sk;
2879 
2880 	/* Fixup the mark to support VTI. */
2881 	skb_mark = skb->mark;
2882 	skb->mark = pol->mark.v;
2883 	xfrm_decode_session(net, skb, &fl, dst->ops->family);
2884 	skb->mark = skb_mark;
2885 	spin_unlock(&pq->hold_queue.lock);
2886 
2887 	dst_hold(xfrm_dst_path(dst));
2888 	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2889 	if (IS_ERR(dst))
2890 		goto purge_queue;
2891 
2892 	if (dst->flags & DST_XFRM_QUEUE) {
2893 		dst_release(dst);
2894 
2895 		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2896 			goto purge_queue;
2897 
2898 		pq->timeout = pq->timeout << 1;
2899 		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2900 			xfrm_pol_hold(pol);
2901 		goto out;
2902 	}
2903 
2904 	dst_release(dst);
2905 
2906 	__skb_queue_head_init(&list);
2907 
2908 	spin_lock(&pq->hold_queue.lock);
2909 	pq->timeout = 0;
2910 	skb_queue_splice_init(&pq->hold_queue, &list);
2911 	spin_unlock(&pq->hold_queue.lock);
2912 
2913 	while (!skb_queue_empty(&list)) {
2914 		skb = __skb_dequeue(&list);
2915 
2916 		/* Fixup the mark to support VTI. */
2917 		skb_mark = skb->mark;
2918 		skb->mark = pol->mark.v;
2919 		xfrm_decode_session(net, skb, &fl, skb_dst(skb)->ops->family);
2920 		skb->mark = skb_mark;
2921 
2922 		dst_hold(xfrm_dst_path(skb_dst(skb)));
2923 		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2924 		if (IS_ERR(dst)) {
2925 			kfree_skb(skb);
2926 			continue;
2927 		}
2928 
2929 		nf_reset_ct(skb);
2930 		skb_dst_drop(skb);
2931 		skb_dst_set(skb, dst);
2932 
2933 		dst_output(net, skb->sk, skb);
2934 	}
2935 
2936 out:
2937 	xfrm_pol_put(pol);
2938 	return;
2939 
2940 purge_queue:
2941 	pq->timeout = 0;
2942 	skb_queue_purge(&pq->hold_queue);
2943 	xfrm_pol_put(pol);
2944 }
2945 
2946 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2947 {
2948 	unsigned long sched_next;
2949 	struct dst_entry *dst = skb_dst(skb);
2950 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2951 	struct xfrm_policy *pol = xdst->pols[0];
2952 	struct xfrm_policy_queue *pq = &pol->polq;
2953 
2954 	if (unlikely(skb_fclone_busy(sk, skb))) {
2955 		kfree_skb(skb);
2956 		return 0;
2957 	}
2958 
2959 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2960 		kfree_skb(skb);
2961 		return -EAGAIN;
2962 	}
2963 
2964 	skb_dst_force(skb);
2965 
2966 	spin_lock_bh(&pq->hold_queue.lock);
2967 
2968 	if (!pq->timeout)
2969 		pq->timeout = XFRM_QUEUE_TMO_MIN;
2970 
2971 	sched_next = jiffies + pq->timeout;
2972 
2973 	if (del_timer(&pq->hold_timer)) {
2974 		if (time_before(pq->hold_timer.expires, sched_next))
2975 			sched_next = pq->hold_timer.expires;
2976 		xfrm_pol_put(pol);
2977 	}
2978 
2979 	__skb_queue_tail(&pq->hold_queue, skb);
2980 	if (!mod_timer(&pq->hold_timer, sched_next))
2981 		xfrm_pol_hold(pol);
2982 
2983 	spin_unlock_bh(&pq->hold_queue.lock);
2984 
2985 	return 0;
2986 }
2987 
2988 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2989 						 struct xfrm_flo *xflo,
2990 						 const struct flowi *fl,
2991 						 int num_xfrms,
2992 						 u16 family)
2993 {
2994 	int err;
2995 	struct net_device *dev;
2996 	struct dst_entry *dst;
2997 	struct dst_entry *dst1;
2998 	struct xfrm_dst *xdst;
2999 
3000 	xdst = xfrm_alloc_dst(net, family);
3001 	if (IS_ERR(xdst))
3002 		return xdst;
3003 
3004 	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
3005 	    net->xfrm.sysctl_larval_drop ||
3006 	    num_xfrms <= 0)
3007 		return xdst;
3008 
3009 	dst = xflo->dst_orig;
3010 	dst1 = &xdst->u.dst;
3011 	dst_hold(dst);
3012 	xdst->route = dst;
3013 
3014 	dst_copy_metrics(dst1, dst);
3015 
3016 	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
3017 	dst1->flags |= DST_XFRM_QUEUE;
3018 	dst1->lastuse = jiffies;
3019 
3020 	dst1->input = dst_discard;
3021 	dst1->output = xdst_queue_output;
3022 
3023 	dst_hold(dst);
3024 	xfrm_dst_set_child(xdst, dst);
3025 	xdst->path = dst;
3026 
3027 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
3028 
3029 	err = -ENODEV;
3030 	dev = dst->dev;
3031 	if (!dev)
3032 		goto free_dst;
3033 
3034 	err = xfrm_fill_dst(xdst, dev, fl);
3035 	if (err)
3036 		goto free_dst;
3037 
3038 out:
3039 	return xdst;
3040 
3041 free_dst:
3042 	dst_release(dst1);
3043 	xdst = ERR_PTR(err);
3044 	goto out;
3045 }
3046 
3047 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
3048 					   const struct flowi *fl,
3049 					   u16 family, u8 dir,
3050 					   struct xfrm_flo *xflo, u32 if_id)
3051 {
3052 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3053 	int num_pols = 0, num_xfrms = 0, err;
3054 	struct xfrm_dst *xdst;
3055 
3056 	/* Resolve policies to use if we couldn't get them from
3057 	 * previous cache entry */
3058 	num_pols = 1;
3059 	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
3060 	err = xfrm_expand_policies(fl, family, pols,
3061 					   &num_pols, &num_xfrms);
3062 	if (err < 0)
3063 		goto inc_error;
3064 	if (num_pols == 0)
3065 		return NULL;
3066 	if (num_xfrms <= 0)
3067 		goto make_dummy_bundle;
3068 
3069 	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
3070 					      xflo->dst_orig);
3071 	if (IS_ERR(xdst)) {
3072 		err = PTR_ERR(xdst);
3073 		if (err == -EREMOTE) {
3074 			xfrm_pols_put(pols, num_pols);
3075 			return NULL;
3076 		}
3077 
3078 		if (err != -EAGAIN)
3079 			goto error;
3080 		goto make_dummy_bundle;
3081 	} else if (xdst == NULL) {
3082 		num_xfrms = 0;
3083 		goto make_dummy_bundle;
3084 	}
3085 
3086 	return xdst;
3087 
3088 make_dummy_bundle:
3089 	/* We found policies, but there's no bundles to instantiate:
3090 	 * either because the policy blocks, has no transformations or
3091 	 * we could not build template (no xfrm_states).*/
3092 	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
3093 	if (IS_ERR(xdst)) {
3094 		xfrm_pols_put(pols, num_pols);
3095 		return ERR_CAST(xdst);
3096 	}
3097 	xdst->num_pols = num_pols;
3098 	xdst->num_xfrms = num_xfrms;
3099 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
3100 
3101 	return xdst;
3102 
3103 inc_error:
3104 	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
3105 error:
3106 	xfrm_pols_put(pols, num_pols);
3107 	return ERR_PTR(err);
3108 }
3109 
3110 static struct dst_entry *make_blackhole(struct net *net, u16 family,
3111 					struct dst_entry *dst_orig)
3112 {
3113 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3114 	struct dst_entry *ret;
3115 
3116 	if (!afinfo) {
3117 		dst_release(dst_orig);
3118 		return ERR_PTR(-EINVAL);
3119 	} else {
3120 		ret = afinfo->blackhole_route(net, dst_orig);
3121 	}
3122 	rcu_read_unlock();
3123 
3124 	return ret;
3125 }
3126 
3127 /* Finds/creates a bundle for given flow and if_id
3128  *
3129  * At the moment we eat a raw IP route. Mostly to speed up lookups
3130  * on interfaces with disabled IPsec.
3131  *
3132  * xfrm_lookup uses an if_id of 0 by default, and is provided for
3133  * compatibility
3134  */
3135 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3136 					struct dst_entry *dst_orig,
3137 					const struct flowi *fl,
3138 					const struct sock *sk,
3139 					int flags, u32 if_id)
3140 {
3141 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3142 	struct xfrm_dst *xdst;
3143 	struct dst_entry *dst, *route;
3144 	u16 family = dst_orig->ops->family;
3145 	u8 dir = XFRM_POLICY_OUT;
3146 	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3147 
3148 	dst = NULL;
3149 	xdst = NULL;
3150 	route = NULL;
3151 
3152 	sk = sk_const_to_full_sk(sk);
3153 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3154 		num_pols = 1;
3155 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3156 						if_id);
3157 		err = xfrm_expand_policies(fl, family, pols,
3158 					   &num_pols, &num_xfrms);
3159 		if (err < 0)
3160 			goto dropdst;
3161 
3162 		if (num_pols) {
3163 			if (num_xfrms <= 0) {
3164 				drop_pols = num_pols;
3165 				goto no_transform;
3166 			}
3167 
3168 			xdst = xfrm_resolve_and_create_bundle(
3169 					pols, num_pols, fl,
3170 					family, dst_orig);
3171 
3172 			if (IS_ERR(xdst)) {
3173 				xfrm_pols_put(pols, num_pols);
3174 				err = PTR_ERR(xdst);
3175 				if (err == -EREMOTE)
3176 					goto nopol;
3177 
3178 				goto dropdst;
3179 			} else if (xdst == NULL) {
3180 				num_xfrms = 0;
3181 				drop_pols = num_pols;
3182 				goto no_transform;
3183 			}
3184 
3185 			route = xdst->route;
3186 		}
3187 	}
3188 
3189 	if (xdst == NULL) {
3190 		struct xfrm_flo xflo;
3191 
3192 		xflo.dst_orig = dst_orig;
3193 		xflo.flags = flags;
3194 
3195 		/* To accelerate a bit...  */
3196 		if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3197 			       !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3198 			goto nopol;
3199 
3200 		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3201 		if (xdst == NULL)
3202 			goto nopol;
3203 		if (IS_ERR(xdst)) {
3204 			err = PTR_ERR(xdst);
3205 			goto dropdst;
3206 		}
3207 
3208 		num_pols = xdst->num_pols;
3209 		num_xfrms = xdst->num_xfrms;
3210 		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3211 		route = xdst->route;
3212 	}
3213 
3214 	dst = &xdst->u.dst;
3215 	if (route == NULL && num_xfrms > 0) {
3216 		/* The only case when xfrm_bundle_lookup() returns a
3217 		 * bundle with null route, is when the template could
3218 		 * not be resolved. It means policies are there, but
3219 		 * bundle could not be created, since we don't yet
3220 		 * have the xfrm_state's. We need to wait for KM to
3221 		 * negotiate new SA's or bail out with error.*/
3222 		if (net->xfrm.sysctl_larval_drop) {
3223 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3224 			err = -EREMOTE;
3225 			goto error;
3226 		}
3227 
3228 		err = -EAGAIN;
3229 
3230 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3231 		goto error;
3232 	}
3233 
3234 no_transform:
3235 	if (num_pols == 0)
3236 		goto nopol;
3237 
3238 	if ((flags & XFRM_LOOKUP_ICMP) &&
3239 	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3240 		err = -ENOENT;
3241 		goto error;
3242 	}
3243 
3244 	for (i = 0; i < num_pols; i++)
3245 		WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
3246 
3247 	if (num_xfrms < 0) {
3248 		/* Prohibit the flow */
3249 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3250 		err = -EPERM;
3251 		goto error;
3252 	} else if (num_xfrms > 0) {
3253 		/* Flow transformed */
3254 		dst_release(dst_orig);
3255 	} else {
3256 		/* Flow passes untransformed */
3257 		dst_release(dst);
3258 		dst = dst_orig;
3259 	}
3260 ok:
3261 	xfrm_pols_put(pols, drop_pols);
3262 	if (dst && dst->xfrm &&
3263 	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3264 		dst->flags |= DST_XFRM_TUNNEL;
3265 	return dst;
3266 
3267 nopol:
3268 	if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
3269 	    net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3270 		err = -EPERM;
3271 		goto error;
3272 	}
3273 	if (!(flags & XFRM_LOOKUP_ICMP)) {
3274 		dst = dst_orig;
3275 		goto ok;
3276 	}
3277 	err = -ENOENT;
3278 error:
3279 	dst_release(dst);
3280 dropdst:
3281 	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3282 		dst_release(dst_orig);
3283 	xfrm_pols_put(pols, drop_pols);
3284 	return ERR_PTR(err);
3285 }
3286 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3287 
3288 /* Main function: finds/creates a bundle for given flow.
3289  *
3290  * At the moment we eat a raw IP route. Mostly to speed up lookups
3291  * on interfaces with disabled IPsec.
3292  */
3293 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3294 			      const struct flowi *fl, const struct sock *sk,
3295 			      int flags)
3296 {
3297 	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3298 }
3299 EXPORT_SYMBOL(xfrm_lookup);
3300 
3301 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3302  * Otherwise we may send out blackholed packets.
3303  */
3304 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3305 				    const struct flowi *fl,
3306 				    const struct sock *sk, int flags)
3307 {
3308 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3309 					    flags | XFRM_LOOKUP_QUEUE |
3310 					    XFRM_LOOKUP_KEEP_DST_REF);
3311 
3312 	if (PTR_ERR(dst) == -EREMOTE)
3313 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
3314 
3315 	if (IS_ERR(dst))
3316 		dst_release(dst_orig);
3317 
3318 	return dst;
3319 }
3320 EXPORT_SYMBOL(xfrm_lookup_route);
3321 
3322 static inline int
3323 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3324 {
3325 	struct sec_path *sp = skb_sec_path(skb);
3326 	struct xfrm_state *x;
3327 
3328 	if (!sp || idx < 0 || idx >= sp->len)
3329 		return 0;
3330 	x = sp->xvec[idx];
3331 	if (!x->type->reject)
3332 		return 0;
3333 	return x->type->reject(x, skb, fl);
3334 }
3335 
3336 /* When skb is transformed back to its "native" form, we have to
3337  * check policy restrictions. At the moment we make this in maximally
3338  * stupid way. Shame on me. :-) Of course, connected sockets must
3339  * have policy cached at them.
3340  */
3341 
3342 static inline int
3343 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3344 	      unsigned short family, u32 if_id)
3345 {
3346 	if (xfrm_state_kern(x))
3347 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3348 	return	x->id.proto == tmpl->id.proto &&
3349 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3350 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3351 		x->props.mode == tmpl->mode &&
3352 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3353 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3354 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
3355 		  xfrm_state_addr_cmp(tmpl, x, family)) &&
3356 		(if_id == 0 || if_id == x->if_id);
3357 }
3358 
3359 /*
3360  * 0 or more than 0 is returned when validation is succeeded (either bypass
3361  * because of optional transport mode, or next index of the matched secpath
3362  * state with the template.
3363  * -1 is returned when no matching template is found.
3364  * Otherwise "-2 - errored_index" is returned.
3365  */
3366 static inline int
3367 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3368 	       unsigned short family, u32 if_id)
3369 {
3370 	int idx = start;
3371 
3372 	if (tmpl->optional) {
3373 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
3374 			return start;
3375 	} else
3376 		start = -1;
3377 	for (; idx < sp->len; idx++) {
3378 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
3379 			return ++idx;
3380 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3381 			if (idx < sp->verified_cnt) {
3382 				/* Secpath entry previously verified, consider optional and
3383 				 * continue searching
3384 				 */
3385 				continue;
3386 			}
3387 
3388 			if (start == -1)
3389 				start = -2-idx;
3390 			break;
3391 		}
3392 	}
3393 	return start;
3394 }
3395 
3396 static void
3397 decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3398 {
3399 	struct flowi4 *fl4 = &fl->u.ip4;
3400 
3401 	memset(fl4, 0, sizeof(struct flowi4));
3402 
3403 	if (reverse) {
3404 		fl4->saddr = flkeys->addrs.ipv4.dst;
3405 		fl4->daddr = flkeys->addrs.ipv4.src;
3406 		fl4->fl4_sport = flkeys->ports.dst;
3407 		fl4->fl4_dport = flkeys->ports.src;
3408 	} else {
3409 		fl4->saddr = flkeys->addrs.ipv4.src;
3410 		fl4->daddr = flkeys->addrs.ipv4.dst;
3411 		fl4->fl4_sport = flkeys->ports.src;
3412 		fl4->fl4_dport = flkeys->ports.dst;
3413 	}
3414 
3415 	switch (flkeys->basic.ip_proto) {
3416 	case IPPROTO_GRE:
3417 		fl4->fl4_gre_key = flkeys->gre.keyid;
3418 		break;
3419 	case IPPROTO_ICMP:
3420 		fl4->fl4_icmp_type = flkeys->icmp.type;
3421 		fl4->fl4_icmp_code = flkeys->icmp.code;
3422 		break;
3423 	}
3424 
3425 	fl4->flowi4_proto = flkeys->basic.ip_proto;
3426 	fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK;
3427 }
3428 
3429 #if IS_ENABLED(CONFIG_IPV6)
3430 static void
3431 decode_session6(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3432 {
3433 	struct flowi6 *fl6 = &fl->u.ip6;
3434 
3435 	memset(fl6, 0, sizeof(struct flowi6));
3436 
3437 	if (reverse) {
3438 		fl6->saddr = flkeys->addrs.ipv6.dst;
3439 		fl6->daddr = flkeys->addrs.ipv6.src;
3440 		fl6->fl6_sport = flkeys->ports.dst;
3441 		fl6->fl6_dport = flkeys->ports.src;
3442 	} else {
3443 		fl6->saddr = flkeys->addrs.ipv6.src;
3444 		fl6->daddr = flkeys->addrs.ipv6.dst;
3445 		fl6->fl6_sport = flkeys->ports.src;
3446 		fl6->fl6_dport = flkeys->ports.dst;
3447 	}
3448 
3449 	switch (flkeys->basic.ip_proto) {
3450 	case IPPROTO_GRE:
3451 		fl6->fl6_gre_key = flkeys->gre.keyid;
3452 		break;
3453 	case IPPROTO_ICMPV6:
3454 		fl6->fl6_icmp_type = flkeys->icmp.type;
3455 		fl6->fl6_icmp_code = flkeys->icmp.code;
3456 		break;
3457 	}
3458 
3459 	fl6->flowi6_proto = flkeys->basic.ip_proto;
3460 }
3461 #endif
3462 
3463 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
3464 			  unsigned int family, int reverse)
3465 {
3466 	struct xfrm_flow_keys flkeys;
3467 
3468 	memset(&flkeys, 0, sizeof(flkeys));
3469 	__skb_flow_dissect(net, skb, &xfrm_session_dissector, &flkeys,
3470 			   NULL, 0, 0, 0, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
3471 
3472 	switch (family) {
3473 	case AF_INET:
3474 		decode_session4(&flkeys, fl, reverse);
3475 		break;
3476 #if IS_ENABLED(CONFIG_IPV6)
3477 	case AF_INET6:
3478 		decode_session6(&flkeys, fl, reverse);
3479 		break;
3480 #endif
3481 	default:
3482 		return -EAFNOSUPPORT;
3483 	}
3484 
3485 	fl->flowi_mark = skb->mark;
3486 	if (reverse) {
3487 		fl->flowi_oif = skb->skb_iif;
3488 	} else {
3489 		int oif = 0;
3490 
3491 		if (skb_dst(skb) && skb_dst(skb)->dev)
3492 			oif = skb_dst(skb)->dev->ifindex;
3493 
3494 		fl->flowi_oif = oif;
3495 	}
3496 
3497 	return security_xfrm_decode_session(skb, &fl->flowi_secid);
3498 }
3499 EXPORT_SYMBOL(__xfrm_decode_session);
3500 
3501 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3502 {
3503 	for (; k < sp->len; k++) {
3504 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3505 			*idxp = k;
3506 			return 1;
3507 		}
3508 	}
3509 
3510 	return 0;
3511 }
3512 
3513 static bool icmp_err_packet(const struct flowi *fl, unsigned short family)
3514 {
3515 	const struct flowi4 *fl4 = &fl->u.ip4;
3516 
3517 	if (family == AF_INET &&
3518 	    fl4->flowi4_proto == IPPROTO_ICMP &&
3519 	    (fl4->fl4_icmp_type == ICMP_DEST_UNREACH ||
3520 	     fl4->fl4_icmp_type == ICMP_TIME_EXCEEDED))
3521 		return true;
3522 
3523 #if IS_ENABLED(CONFIG_IPV6)
3524 	if (family == AF_INET6) {
3525 		const struct flowi6 *fl6 = &fl->u.ip6;
3526 
3527 		if (fl6->flowi6_proto == IPPROTO_ICMPV6 &&
3528 		    (fl6->fl6_icmp_type == ICMPV6_DEST_UNREACH ||
3529 		    fl6->fl6_icmp_type == ICMPV6_PKT_TOOBIG ||
3530 		    fl6->fl6_icmp_type == ICMPV6_TIME_EXCEED))
3531 			return true;
3532 	}
3533 #endif
3534 	return false;
3535 }
3536 
3537 static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family,
3538 				  const struct flowi *fl, struct flowi *fl1)
3539 {
3540 	bool ret = true;
3541 	struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
3542 	int hl = family == AF_INET ? (sizeof(struct iphdr) +  sizeof(struct icmphdr)) :
3543 		 (sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
3544 
3545 	if (!newskb)
3546 		return true;
3547 
3548 	if (!pskb_pull(newskb, hl))
3549 		goto out;
3550 
3551 	skb_reset_network_header(newskb);
3552 
3553 	if (xfrm_decode_session_reverse(dev_net(skb->dev), newskb, fl1, family) < 0)
3554 		goto out;
3555 
3556 	fl1->flowi_oif = fl->flowi_oif;
3557 	fl1->flowi_mark = fl->flowi_mark;
3558 	fl1->flowi_tos = fl->flowi_tos;
3559 	nf_nat_decode_session(newskb, fl1, family);
3560 	ret = false;
3561 
3562 out:
3563 	consume_skb(newskb);
3564 	return ret;
3565 }
3566 
3567 static bool xfrm_selector_inner_icmp_match(struct sk_buff *skb, unsigned short family,
3568 					   const struct xfrm_selector *sel,
3569 					   const struct flowi *fl)
3570 {
3571 	bool ret = false;
3572 
3573 	if (icmp_err_packet(fl, family)) {
3574 		struct flowi fl1;
3575 
3576 		if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3577 			return ret;
3578 
3579 		ret = xfrm_selector_match(sel, &fl1, family);
3580 	}
3581 
3582 	return ret;
3583 }
3584 
3585 static inline struct
3586 xfrm_policy *xfrm_in_fwd_icmp(struct sk_buff *skb,
3587 			      const struct flowi *fl, unsigned short family,
3588 			      u32 if_id)
3589 {
3590 	struct xfrm_policy *pol = NULL;
3591 
3592 	if (icmp_err_packet(fl, family)) {
3593 		struct flowi fl1;
3594 		struct net *net = dev_net(skb->dev);
3595 
3596 		if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3597 			return pol;
3598 
3599 		pol = xfrm_policy_lookup(net, &fl1, family, XFRM_POLICY_FWD, if_id);
3600 		if (IS_ERR(pol))
3601 			pol = NULL;
3602 	}
3603 
3604 	return pol;
3605 }
3606 
3607 static inline struct
3608 dst_entry *xfrm_out_fwd_icmp(struct sk_buff *skb, struct flowi *fl,
3609 			     unsigned short family, struct dst_entry *dst)
3610 {
3611 	if (icmp_err_packet(fl, family)) {
3612 		struct net *net = dev_net(skb->dev);
3613 		struct dst_entry *dst2;
3614 		struct flowi fl1;
3615 
3616 		if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3617 			return dst;
3618 
3619 		dst_hold(dst);
3620 
3621 		dst2 = xfrm_lookup(net, dst, &fl1, NULL, (XFRM_LOOKUP_QUEUE | XFRM_LOOKUP_ICMP));
3622 
3623 		if (IS_ERR(dst2))
3624 			return dst;
3625 
3626 		if (dst2->xfrm) {
3627 			dst_release(dst);
3628 			dst = dst2;
3629 		} else {
3630 			dst_release(dst2);
3631 		}
3632 	}
3633 
3634 	return dst;
3635 }
3636 
3637 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3638 			unsigned short family)
3639 {
3640 	struct net *net = dev_net(skb->dev);
3641 	struct xfrm_policy *pol;
3642 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3643 	int npols = 0;
3644 	int xfrm_nr;
3645 	int pi;
3646 	int reverse;
3647 	struct flowi fl;
3648 	int xerr_idx = -1;
3649 	const struct xfrm_if_cb *ifcb;
3650 	struct sec_path *sp;
3651 	u32 if_id = 0;
3652 
3653 	rcu_read_lock();
3654 	ifcb = xfrm_if_get_cb();
3655 
3656 	if (ifcb) {
3657 		struct xfrm_if_decode_session_result r;
3658 
3659 		if (ifcb->decode_session(skb, family, &r)) {
3660 			if_id = r.if_id;
3661 			net = r.net;
3662 		}
3663 	}
3664 	rcu_read_unlock();
3665 
3666 	reverse = dir & ~XFRM_POLICY_MASK;
3667 	dir &= XFRM_POLICY_MASK;
3668 
3669 	if (__xfrm_decode_session(net, skb, &fl, family, reverse) < 0) {
3670 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3671 		return 0;
3672 	}
3673 
3674 	nf_nat_decode_session(skb, &fl, family);
3675 
3676 	/* First, check used SA against their selectors. */
3677 	sp = skb_sec_path(skb);
3678 	if (sp) {
3679 		int i;
3680 
3681 		for (i = sp->len - 1; i >= 0; i--) {
3682 			struct xfrm_state *x = sp->xvec[i];
3683 			int ret = 0;
3684 
3685 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
3686 				ret = 1;
3687 				if (x->props.flags & XFRM_STATE_ICMP &&
3688 				    xfrm_selector_inner_icmp_match(skb, family, &x->sel, &fl))
3689 					ret = 0;
3690 				if (ret) {
3691 					XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3692 					return 0;
3693 				}
3694 			}
3695 		}
3696 	}
3697 
3698 	pol = NULL;
3699 	sk = sk_to_full_sk(sk);
3700 	if (sk && sk->sk_policy[dir]) {
3701 		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3702 		if (IS_ERR(pol)) {
3703 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3704 			return 0;
3705 		}
3706 	}
3707 
3708 	if (!pol)
3709 		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3710 
3711 	if (IS_ERR(pol)) {
3712 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3713 		return 0;
3714 	}
3715 
3716 	if (!pol && dir == XFRM_POLICY_FWD)
3717 		pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id);
3718 
3719 	if (!pol) {
3720 		const bool is_crypto_offload = sp &&
3721 			(xfrm_input_state(skb)->xso.type == XFRM_DEV_OFFLOAD_CRYPTO);
3722 
3723 		if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3724 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3725 			return 0;
3726 		}
3727 
3728 		if (sp && secpath_has_nontransport(sp, 0, &xerr_idx) && !is_crypto_offload) {
3729 			xfrm_secpath_reject(xerr_idx, skb, &fl);
3730 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3731 			return 0;
3732 		}
3733 		return 1;
3734 	}
3735 
3736 	/* This lockless write can happen from different cpus. */
3737 	WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
3738 
3739 	pols[0] = pol;
3740 	npols++;
3741 #ifdef CONFIG_XFRM_SUB_POLICY
3742 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3743 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3744 						    &fl, family,
3745 						    XFRM_POLICY_IN, if_id);
3746 		if (pols[1]) {
3747 			if (IS_ERR(pols[1])) {
3748 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3749 				xfrm_pol_put(pols[0]);
3750 				return 0;
3751 			}
3752 			/* This write can happen from different cpus. */
3753 			WRITE_ONCE(pols[1]->curlft.use_time,
3754 				   ktime_get_real_seconds());
3755 			npols++;
3756 		}
3757 	}
3758 #endif
3759 
3760 	if (pol->action == XFRM_POLICY_ALLOW) {
3761 		static struct sec_path dummy;
3762 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3763 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3764 		struct xfrm_tmpl **tpp = tp;
3765 		int ti = 0;
3766 		int i, k;
3767 
3768 		sp = skb_sec_path(skb);
3769 		if (!sp)
3770 			sp = &dummy;
3771 
3772 		for (pi = 0; pi < npols; pi++) {
3773 			if (pols[pi] != pol &&
3774 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
3775 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3776 				goto reject;
3777 			}
3778 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3779 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3780 				goto reject_error;
3781 			}
3782 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
3783 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
3784 		}
3785 		xfrm_nr = ti;
3786 
3787 		if (npols > 1) {
3788 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3789 			tpp = stp;
3790 		}
3791 
3792 		/* For each tunnel xfrm, find the first matching tmpl.
3793 		 * For each tmpl before that, find corresponding xfrm.
3794 		 * Order is _important_. Later we will implement
3795 		 * some barriers, but at the moment barriers
3796 		 * are implied between each two transformations.
3797 		 * Upon success, marks secpath entries as having been
3798 		 * verified to allow them to be skipped in future policy
3799 		 * checks (e.g. nested tunnels).
3800 		 */
3801 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3802 			k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
3803 			if (k < 0) {
3804 				if (k < -1)
3805 					/* "-2 - errored_index" returned */
3806 					xerr_idx = -(2+k);
3807 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3808 				goto reject;
3809 			}
3810 		}
3811 
3812 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3813 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3814 			goto reject;
3815 		}
3816 
3817 		xfrm_pols_put(pols, npols);
3818 		sp->verified_cnt = k;
3819 
3820 		return 1;
3821 	}
3822 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3823 
3824 reject:
3825 	xfrm_secpath_reject(xerr_idx, skb, &fl);
3826 reject_error:
3827 	xfrm_pols_put(pols, npols);
3828 	return 0;
3829 }
3830 EXPORT_SYMBOL(__xfrm_policy_check);
3831 
3832 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3833 {
3834 	struct net *net = dev_net(skb->dev);
3835 	struct flowi fl;
3836 	struct dst_entry *dst;
3837 	int res = 1;
3838 
3839 	if (xfrm_decode_session(net, skb, &fl, family) < 0) {
3840 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3841 		return 0;
3842 	}
3843 
3844 	skb_dst_force(skb);
3845 	if (!skb_dst(skb)) {
3846 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3847 		return 0;
3848 	}
3849 
3850 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3851 	if (IS_ERR(dst)) {
3852 		res = 0;
3853 		dst = NULL;
3854 	}
3855 
3856 	if (dst && !dst->xfrm)
3857 		dst = xfrm_out_fwd_icmp(skb, &fl, family, dst);
3858 
3859 	skb_dst_set(skb, dst);
3860 	return res;
3861 }
3862 EXPORT_SYMBOL(__xfrm_route_forward);
3863 
3864 /* Optimize later using cookies and generation ids. */
3865 
3866 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3867 {
3868 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3869 	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3870 	 * get validated by dst_ops->check on every use.  We do this
3871 	 * because when a normal route referenced by an XFRM dst is
3872 	 * obsoleted we do not go looking around for all parent
3873 	 * referencing XFRM dsts so that we can invalidate them.  It
3874 	 * is just too much work.  Instead we make the checks here on
3875 	 * every use.  For example:
3876 	 *
3877 	 *	XFRM dst A --> IPv4 dst X
3878 	 *
3879 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
3880 	 * in this example).  If X is marked obsolete, "A" will not
3881 	 * notice.  That's what we are validating here via the
3882 	 * stale_bundle() check.
3883 	 *
3884 	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3885 	 * be marked on it.
3886 	 * This will force stale_bundle() to fail on any xdst bundle with
3887 	 * this dst linked in it.
3888 	 */
3889 	if (dst->obsolete < 0 && !stale_bundle(dst))
3890 		return dst;
3891 
3892 	return NULL;
3893 }
3894 
3895 static int stale_bundle(struct dst_entry *dst)
3896 {
3897 	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3898 }
3899 
3900 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3901 {
3902 	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3903 		dst->dev = blackhole_netdev;
3904 		dev_hold(dst->dev);
3905 		dev_put(dev);
3906 	}
3907 }
3908 EXPORT_SYMBOL(xfrm_dst_ifdown);
3909 
3910 static void xfrm_link_failure(struct sk_buff *skb)
3911 {
3912 	/* Impossible. Such dst must be popped before reaches point of failure. */
3913 }
3914 
3915 static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
3916 {
3917 	if (dst->obsolete)
3918 		sk_dst_reset(sk);
3919 }
3920 
3921 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3922 {
3923 	while (nr--) {
3924 		struct xfrm_dst *xdst = bundle[nr];
3925 		u32 pmtu, route_mtu_cached;
3926 		struct dst_entry *dst;
3927 
3928 		dst = &xdst->u.dst;
3929 		pmtu = dst_mtu(xfrm_dst_child(dst));
3930 		xdst->child_mtu_cached = pmtu;
3931 
3932 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3933 
3934 		route_mtu_cached = dst_mtu(xdst->route);
3935 		xdst->route_mtu_cached = route_mtu_cached;
3936 
3937 		if (pmtu > route_mtu_cached)
3938 			pmtu = route_mtu_cached;
3939 
3940 		dst_metric_set(dst, RTAX_MTU, pmtu);
3941 	}
3942 }
3943 
3944 /* Check that the bundle accepts the flow and its components are
3945  * still valid.
3946  */
3947 
3948 static int xfrm_bundle_ok(struct xfrm_dst *first)
3949 {
3950 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3951 	struct dst_entry *dst = &first->u.dst;
3952 	struct xfrm_dst *xdst;
3953 	int start_from, nr;
3954 	u32 mtu;
3955 
3956 	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3957 	    (dst->dev && !netif_running(dst->dev)))
3958 		return 0;
3959 
3960 	if (dst->flags & DST_XFRM_QUEUE)
3961 		return 1;
3962 
3963 	start_from = nr = 0;
3964 	do {
3965 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3966 
3967 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
3968 			return 0;
3969 		if (xdst->xfrm_genid != dst->xfrm->genid)
3970 			return 0;
3971 		if (xdst->num_pols > 0 &&
3972 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3973 			return 0;
3974 
3975 		bundle[nr++] = xdst;
3976 
3977 		mtu = dst_mtu(xfrm_dst_child(dst));
3978 		if (xdst->child_mtu_cached != mtu) {
3979 			start_from = nr;
3980 			xdst->child_mtu_cached = mtu;
3981 		}
3982 
3983 		if (!dst_check(xdst->route, xdst->route_cookie))
3984 			return 0;
3985 		mtu = dst_mtu(xdst->route);
3986 		if (xdst->route_mtu_cached != mtu) {
3987 			start_from = nr;
3988 			xdst->route_mtu_cached = mtu;
3989 		}
3990 
3991 		dst = xfrm_dst_child(dst);
3992 	} while (dst->xfrm);
3993 
3994 	if (likely(!start_from))
3995 		return 1;
3996 
3997 	xdst = bundle[start_from - 1];
3998 	mtu = xdst->child_mtu_cached;
3999 	while (start_from--) {
4000 		dst = &xdst->u.dst;
4001 
4002 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
4003 		if (mtu > xdst->route_mtu_cached)
4004 			mtu = xdst->route_mtu_cached;
4005 		dst_metric_set(dst, RTAX_MTU, mtu);
4006 		if (!start_from)
4007 			break;
4008 
4009 		xdst = bundle[start_from - 1];
4010 		xdst->child_mtu_cached = mtu;
4011 	}
4012 
4013 	return 1;
4014 }
4015 
4016 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
4017 {
4018 	return dst_metric_advmss(xfrm_dst_path(dst));
4019 }
4020 
4021 static unsigned int xfrm_mtu(const struct dst_entry *dst)
4022 {
4023 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
4024 
4025 	return mtu ? : dst_mtu(xfrm_dst_path(dst));
4026 }
4027 
4028 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
4029 					const void *daddr)
4030 {
4031 	while (dst->xfrm) {
4032 		const struct xfrm_state *xfrm = dst->xfrm;
4033 
4034 		dst = xfrm_dst_child(dst);
4035 
4036 		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
4037 			continue;
4038 		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
4039 			daddr = xfrm->coaddr;
4040 		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
4041 			daddr = &xfrm->id.daddr;
4042 	}
4043 	return daddr;
4044 }
4045 
4046 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
4047 					   struct sk_buff *skb,
4048 					   const void *daddr)
4049 {
4050 	const struct dst_entry *path = xfrm_dst_path(dst);
4051 
4052 	if (!skb)
4053 		daddr = xfrm_get_dst_nexthop(dst, daddr);
4054 	return path->ops->neigh_lookup(path, skb, daddr);
4055 }
4056 
4057 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
4058 {
4059 	const struct dst_entry *path = xfrm_dst_path(dst);
4060 
4061 	daddr = xfrm_get_dst_nexthop(dst, daddr);
4062 	path->ops->confirm_neigh(path, daddr);
4063 }
4064 
4065 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
4066 {
4067 	int err = 0;
4068 
4069 	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
4070 		return -EAFNOSUPPORT;
4071 
4072 	spin_lock(&xfrm_policy_afinfo_lock);
4073 	if (unlikely(xfrm_policy_afinfo[family] != NULL))
4074 		err = -EEXIST;
4075 	else {
4076 		struct dst_ops *dst_ops = afinfo->dst_ops;
4077 		if (likely(dst_ops->kmem_cachep == NULL))
4078 			dst_ops->kmem_cachep = xfrm_dst_cache;
4079 		if (likely(dst_ops->check == NULL))
4080 			dst_ops->check = xfrm_dst_check;
4081 		if (likely(dst_ops->default_advmss == NULL))
4082 			dst_ops->default_advmss = xfrm_default_advmss;
4083 		if (likely(dst_ops->mtu == NULL))
4084 			dst_ops->mtu = xfrm_mtu;
4085 		if (likely(dst_ops->negative_advice == NULL))
4086 			dst_ops->negative_advice = xfrm_negative_advice;
4087 		if (likely(dst_ops->link_failure == NULL))
4088 			dst_ops->link_failure = xfrm_link_failure;
4089 		if (likely(dst_ops->neigh_lookup == NULL))
4090 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
4091 		if (likely(!dst_ops->confirm_neigh))
4092 			dst_ops->confirm_neigh = xfrm_confirm_neigh;
4093 		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
4094 	}
4095 	spin_unlock(&xfrm_policy_afinfo_lock);
4096 
4097 	return err;
4098 }
4099 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
4100 
4101 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
4102 {
4103 	struct dst_ops *dst_ops = afinfo->dst_ops;
4104 	int i;
4105 
4106 	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
4107 		if (xfrm_policy_afinfo[i] != afinfo)
4108 			continue;
4109 		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
4110 		break;
4111 	}
4112 
4113 	synchronize_rcu();
4114 
4115 	dst_ops->kmem_cachep = NULL;
4116 	dst_ops->check = NULL;
4117 	dst_ops->negative_advice = NULL;
4118 	dst_ops->link_failure = NULL;
4119 }
4120 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
4121 
4122 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
4123 {
4124 	spin_lock(&xfrm_if_cb_lock);
4125 	rcu_assign_pointer(xfrm_if_cb, ifcb);
4126 	spin_unlock(&xfrm_if_cb_lock);
4127 }
4128 EXPORT_SYMBOL(xfrm_if_register_cb);
4129 
4130 void xfrm_if_unregister_cb(void)
4131 {
4132 	RCU_INIT_POINTER(xfrm_if_cb, NULL);
4133 	synchronize_rcu();
4134 }
4135 EXPORT_SYMBOL(xfrm_if_unregister_cb);
4136 
4137 #ifdef CONFIG_XFRM_STATISTICS
4138 static int __net_init xfrm_statistics_init(struct net *net)
4139 {
4140 	int rv;
4141 	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
4142 	if (!net->mib.xfrm_statistics)
4143 		return -ENOMEM;
4144 	rv = xfrm_proc_init(net);
4145 	if (rv < 0)
4146 		free_percpu(net->mib.xfrm_statistics);
4147 	return rv;
4148 }
4149 
4150 static void xfrm_statistics_fini(struct net *net)
4151 {
4152 	xfrm_proc_fini(net);
4153 	free_percpu(net->mib.xfrm_statistics);
4154 }
4155 #else
4156 static int __net_init xfrm_statistics_init(struct net *net)
4157 {
4158 	return 0;
4159 }
4160 
4161 static void xfrm_statistics_fini(struct net *net)
4162 {
4163 }
4164 #endif
4165 
4166 static int __net_init xfrm_policy_init(struct net *net)
4167 {
4168 	unsigned int hmask, sz;
4169 	int dir, err;
4170 
4171 	if (net_eq(net, &init_net)) {
4172 		xfrm_dst_cache = KMEM_CACHE(xfrm_dst, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4173 		err = rhashtable_init(&xfrm_policy_inexact_table,
4174 				      &xfrm_pol_inexact_params);
4175 		BUG_ON(err);
4176 	}
4177 
4178 	hmask = 8 - 1;
4179 	sz = (hmask+1) * sizeof(struct hlist_head);
4180 
4181 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4182 	if (!net->xfrm.policy_byidx)
4183 		goto out_byidx;
4184 	net->xfrm.policy_idx_hmask = hmask;
4185 
4186 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4187 		struct xfrm_policy_hash *htab;
4188 
4189 		net->xfrm.policy_count[dir] = 0;
4190 		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4191 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4192 
4193 		htab = &net->xfrm.policy_bydst[dir];
4194 		htab->table = xfrm_hash_alloc(sz);
4195 		if (!htab->table)
4196 			goto out_bydst;
4197 		htab->hmask = hmask;
4198 		htab->dbits4 = 32;
4199 		htab->sbits4 = 32;
4200 		htab->dbits6 = 128;
4201 		htab->sbits6 = 128;
4202 	}
4203 	net->xfrm.policy_hthresh.lbits4 = 32;
4204 	net->xfrm.policy_hthresh.rbits4 = 32;
4205 	net->xfrm.policy_hthresh.lbits6 = 128;
4206 	net->xfrm.policy_hthresh.rbits6 = 128;
4207 
4208 	seqlock_init(&net->xfrm.policy_hthresh.lock);
4209 
4210 	INIT_LIST_HEAD(&net->xfrm.policy_all);
4211 	INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4212 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4213 	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4214 	return 0;
4215 
4216 out_bydst:
4217 	for (dir--; dir >= 0; dir--) {
4218 		struct xfrm_policy_hash *htab;
4219 
4220 		htab = &net->xfrm.policy_bydst[dir];
4221 		xfrm_hash_free(htab->table, sz);
4222 	}
4223 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4224 out_byidx:
4225 	return -ENOMEM;
4226 }
4227 
4228 static void xfrm_policy_fini(struct net *net)
4229 {
4230 	struct xfrm_pol_inexact_bin *b, *t;
4231 	unsigned int sz;
4232 	int dir;
4233 
4234 	flush_work(&net->xfrm.policy_hash_work);
4235 #ifdef CONFIG_XFRM_SUB_POLICY
4236 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4237 #endif
4238 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4239 
4240 	WARN_ON(!list_empty(&net->xfrm.policy_all));
4241 
4242 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4243 		struct xfrm_policy_hash *htab;
4244 
4245 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4246 
4247 		htab = &net->xfrm.policy_bydst[dir];
4248 		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4249 		WARN_ON(!hlist_empty(htab->table));
4250 		xfrm_hash_free(htab->table, sz);
4251 	}
4252 
4253 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4254 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4255 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4256 
4257 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4258 	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4259 		__xfrm_policy_inexact_prune_bin(b, true);
4260 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4261 }
4262 
4263 static int __net_init xfrm_net_init(struct net *net)
4264 {
4265 	int rv;
4266 
4267 	/* Initialize the per-net locks here */
4268 	spin_lock_init(&net->xfrm.xfrm_state_lock);
4269 	spin_lock_init(&net->xfrm.xfrm_policy_lock);
4270 	seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4271 	mutex_init(&net->xfrm.xfrm_cfg_mutex);
4272 	net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
4273 	net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
4274 	net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
4275 
4276 	rv = xfrm_statistics_init(net);
4277 	if (rv < 0)
4278 		goto out_statistics;
4279 	rv = xfrm_state_init(net);
4280 	if (rv < 0)
4281 		goto out_state;
4282 	rv = xfrm_policy_init(net);
4283 	if (rv < 0)
4284 		goto out_policy;
4285 	rv = xfrm_sysctl_init(net);
4286 	if (rv < 0)
4287 		goto out_sysctl;
4288 
4289 	rv = xfrm_nat_keepalive_net_init(net);
4290 	if (rv < 0)
4291 		goto out_nat_keepalive;
4292 
4293 	return 0;
4294 
4295 out_nat_keepalive:
4296 	xfrm_sysctl_fini(net);
4297 out_sysctl:
4298 	xfrm_policy_fini(net);
4299 out_policy:
4300 	xfrm_state_fini(net);
4301 out_state:
4302 	xfrm_statistics_fini(net);
4303 out_statistics:
4304 	return rv;
4305 }
4306 
4307 static void __net_exit xfrm_net_exit(struct net *net)
4308 {
4309 	xfrm_nat_keepalive_net_fini(net);
4310 	xfrm_sysctl_fini(net);
4311 	xfrm_policy_fini(net);
4312 	xfrm_state_fini(net);
4313 	xfrm_statistics_fini(net);
4314 }
4315 
4316 static struct pernet_operations __net_initdata xfrm_net_ops = {
4317 	.init = xfrm_net_init,
4318 	.exit = xfrm_net_exit,
4319 };
4320 
4321 static const struct flow_dissector_key xfrm_flow_dissector_keys[] = {
4322 	{
4323 		.key_id = FLOW_DISSECTOR_KEY_CONTROL,
4324 		.offset = offsetof(struct xfrm_flow_keys, control),
4325 	},
4326 	{
4327 		.key_id = FLOW_DISSECTOR_KEY_BASIC,
4328 		.offset = offsetof(struct xfrm_flow_keys, basic),
4329 	},
4330 	{
4331 		.key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
4332 		.offset = offsetof(struct xfrm_flow_keys, addrs.ipv4),
4333 	},
4334 	{
4335 		.key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
4336 		.offset = offsetof(struct xfrm_flow_keys, addrs.ipv6),
4337 	},
4338 	{
4339 		.key_id = FLOW_DISSECTOR_KEY_PORTS,
4340 		.offset = offsetof(struct xfrm_flow_keys, ports),
4341 	},
4342 	{
4343 		.key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
4344 		.offset = offsetof(struct xfrm_flow_keys, gre),
4345 	},
4346 	{
4347 		.key_id = FLOW_DISSECTOR_KEY_IP,
4348 		.offset = offsetof(struct xfrm_flow_keys, ip),
4349 	},
4350 	{
4351 		.key_id = FLOW_DISSECTOR_KEY_ICMP,
4352 		.offset = offsetof(struct xfrm_flow_keys, icmp),
4353 	},
4354 };
4355 
4356 void __init xfrm_init(void)
4357 {
4358 	skb_flow_dissector_init(&xfrm_session_dissector,
4359 				xfrm_flow_dissector_keys,
4360 				ARRAY_SIZE(xfrm_flow_dissector_keys));
4361 
4362 	register_pernet_subsys(&xfrm_net_ops);
4363 	xfrm_dev_init();
4364 	xfrm_input_init();
4365 
4366 #ifdef CONFIG_XFRM_ESPINTCP
4367 	espintcp_init();
4368 #endif
4369 
4370 	register_xfrm_state_bpf();
4371 	xfrm_nat_keepalive_init(AF_INET);
4372 }
4373 
4374 #ifdef CONFIG_AUDITSYSCALL
4375 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4376 					 struct audit_buffer *audit_buf)
4377 {
4378 	struct xfrm_sec_ctx *ctx = xp->security;
4379 	struct xfrm_selector *sel = &xp->selector;
4380 
4381 	if (ctx)
4382 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4383 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4384 
4385 	switch (sel->family) {
4386 	case AF_INET:
4387 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4388 		if (sel->prefixlen_s != 32)
4389 			audit_log_format(audit_buf, " src_prefixlen=%d",
4390 					 sel->prefixlen_s);
4391 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4392 		if (sel->prefixlen_d != 32)
4393 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4394 					 sel->prefixlen_d);
4395 		break;
4396 	case AF_INET6:
4397 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4398 		if (sel->prefixlen_s != 128)
4399 			audit_log_format(audit_buf, " src_prefixlen=%d",
4400 					 sel->prefixlen_s);
4401 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4402 		if (sel->prefixlen_d != 128)
4403 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4404 					 sel->prefixlen_d);
4405 		break;
4406 	}
4407 }
4408 
4409 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4410 {
4411 	struct audit_buffer *audit_buf;
4412 
4413 	audit_buf = xfrm_audit_start("SPD-add");
4414 	if (audit_buf == NULL)
4415 		return;
4416 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4417 	audit_log_format(audit_buf, " res=%u", result);
4418 	xfrm_audit_common_policyinfo(xp, audit_buf);
4419 	audit_log_end(audit_buf);
4420 }
4421 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4422 
4423 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4424 			      bool task_valid)
4425 {
4426 	struct audit_buffer *audit_buf;
4427 
4428 	audit_buf = xfrm_audit_start("SPD-delete");
4429 	if (audit_buf == NULL)
4430 		return;
4431 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4432 	audit_log_format(audit_buf, " res=%u", result);
4433 	xfrm_audit_common_policyinfo(xp, audit_buf);
4434 	audit_log_end(audit_buf);
4435 }
4436 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4437 #endif
4438 
4439 #ifdef CONFIG_XFRM_MIGRATE
4440 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4441 					const struct xfrm_selector *sel_tgt)
4442 {
4443 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4444 		if (sel_tgt->family == sel_cmp->family &&
4445 		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4446 				    sel_cmp->family) &&
4447 		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4448 				    sel_cmp->family) &&
4449 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4450 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4451 			return true;
4452 		}
4453 	} else {
4454 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4455 			return true;
4456 		}
4457 	}
4458 	return false;
4459 }
4460 
4461 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4462 						    u8 dir, u8 type, struct net *net, u32 if_id)
4463 {
4464 	struct xfrm_policy *pol, *ret = NULL;
4465 	struct hlist_head *chain;
4466 	u32 priority = ~0U;
4467 
4468 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4469 	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4470 	hlist_for_each_entry(pol, chain, bydst) {
4471 		if ((if_id == 0 || pol->if_id == if_id) &&
4472 		    xfrm_migrate_selector_match(sel, &pol->selector) &&
4473 		    pol->type == type) {
4474 			ret = pol;
4475 			priority = ret->priority;
4476 			break;
4477 		}
4478 	}
4479 	chain = &net->xfrm.policy_inexact[dir];
4480 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4481 		if ((pol->priority >= priority) && ret)
4482 			break;
4483 
4484 		if ((if_id == 0 || pol->if_id == if_id) &&
4485 		    xfrm_migrate_selector_match(sel, &pol->selector) &&
4486 		    pol->type == type) {
4487 			ret = pol;
4488 			break;
4489 		}
4490 	}
4491 
4492 	xfrm_pol_hold(ret);
4493 
4494 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4495 
4496 	return ret;
4497 }
4498 
4499 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4500 {
4501 	int match = 0;
4502 
4503 	if (t->mode == m->mode && t->id.proto == m->proto &&
4504 	    (m->reqid == 0 || t->reqid == m->reqid)) {
4505 		switch (t->mode) {
4506 		case XFRM_MODE_TUNNEL:
4507 		case XFRM_MODE_BEET:
4508 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4509 					    m->old_family) &&
4510 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
4511 					    m->old_family)) {
4512 				match = 1;
4513 			}
4514 			break;
4515 		case XFRM_MODE_TRANSPORT:
4516 			/* in case of transport mode, template does not store
4517 			   any IP addresses, hence we just compare mode and
4518 			   protocol */
4519 			match = 1;
4520 			break;
4521 		default:
4522 			break;
4523 		}
4524 	}
4525 	return match;
4526 }
4527 
4528 /* update endpoint address(es) of template(s) */
4529 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4530 			       struct xfrm_migrate *m, int num_migrate,
4531 			       struct netlink_ext_ack *extack)
4532 {
4533 	struct xfrm_migrate *mp;
4534 	int i, j, n = 0;
4535 
4536 	write_lock_bh(&pol->lock);
4537 	if (unlikely(pol->walk.dead)) {
4538 		/* target policy has been deleted */
4539 		NL_SET_ERR_MSG(extack, "Target policy not found");
4540 		write_unlock_bh(&pol->lock);
4541 		return -ENOENT;
4542 	}
4543 
4544 	for (i = 0; i < pol->xfrm_nr; i++) {
4545 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4546 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4547 				continue;
4548 			n++;
4549 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4550 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4551 				continue;
4552 			/* update endpoints */
4553 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4554 			       sizeof(pol->xfrm_vec[i].id.daddr));
4555 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4556 			       sizeof(pol->xfrm_vec[i].saddr));
4557 			pol->xfrm_vec[i].encap_family = mp->new_family;
4558 			/* flush bundles */
4559 			atomic_inc(&pol->genid);
4560 		}
4561 	}
4562 
4563 	write_unlock_bh(&pol->lock);
4564 
4565 	if (!n)
4566 		return -ENODATA;
4567 
4568 	return 0;
4569 }
4570 
4571 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate,
4572 			      struct netlink_ext_ack *extack)
4573 {
4574 	int i, j;
4575 
4576 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) {
4577 		NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
4578 		return -EINVAL;
4579 	}
4580 
4581 	for (i = 0; i < num_migrate; i++) {
4582 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4583 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) {
4584 			NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null");
4585 			return -EINVAL;
4586 		}
4587 
4588 		/* check if there is any duplicated entry */
4589 		for (j = i + 1; j < num_migrate; j++) {
4590 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4591 				    sizeof(m[i].old_daddr)) &&
4592 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4593 				    sizeof(m[i].old_saddr)) &&
4594 			    m[i].proto == m[j].proto &&
4595 			    m[i].mode == m[j].mode &&
4596 			    m[i].reqid == m[j].reqid &&
4597 			    m[i].old_family == m[j].old_family) {
4598 				NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique");
4599 				return -EINVAL;
4600 			}
4601 		}
4602 	}
4603 
4604 	return 0;
4605 }
4606 
4607 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4608 		 struct xfrm_migrate *m, int num_migrate,
4609 		 struct xfrm_kmaddress *k, struct net *net,
4610 		 struct xfrm_encap_tmpl *encap, u32 if_id,
4611 		 struct netlink_ext_ack *extack)
4612 {
4613 	int i, err, nx_cur = 0, nx_new = 0;
4614 	struct xfrm_policy *pol = NULL;
4615 	struct xfrm_state *x, *xc;
4616 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4617 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4618 	struct xfrm_migrate *mp;
4619 
4620 	/* Stage 0 - sanity checks */
4621 	err = xfrm_migrate_check(m, num_migrate, extack);
4622 	if (err < 0)
4623 		goto out;
4624 
4625 	if (dir >= XFRM_POLICY_MAX) {
4626 		NL_SET_ERR_MSG(extack, "Invalid policy direction");
4627 		err = -EINVAL;
4628 		goto out;
4629 	}
4630 
4631 	/* Stage 1 - find policy */
4632 	pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
4633 	if (!pol) {
4634 		NL_SET_ERR_MSG(extack, "Target policy not found");
4635 		err = -ENOENT;
4636 		goto out;
4637 	}
4638 
4639 	/* Stage 2 - find and update state(s) */
4640 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4641 		if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
4642 			x_cur[nx_cur] = x;
4643 			nx_cur++;
4644 			xc = xfrm_state_migrate(x, mp, encap);
4645 			if (xc) {
4646 				x_new[nx_new] = xc;
4647 				nx_new++;
4648 			} else {
4649 				err = -ENODATA;
4650 				goto restore_state;
4651 			}
4652 		}
4653 	}
4654 
4655 	/* Stage 3 - update policy */
4656 	err = xfrm_policy_migrate(pol, m, num_migrate, extack);
4657 	if (err < 0)
4658 		goto restore_state;
4659 
4660 	/* Stage 4 - delete old state(s) */
4661 	if (nx_cur) {
4662 		xfrm_states_put(x_cur, nx_cur);
4663 		xfrm_states_delete(x_cur, nx_cur);
4664 	}
4665 
4666 	/* Stage 5 - announce */
4667 	km_migrate(sel, dir, type, m, num_migrate, k, encap);
4668 
4669 	xfrm_pol_put(pol);
4670 
4671 	return 0;
4672 out:
4673 	return err;
4674 
4675 restore_state:
4676 	if (pol)
4677 		xfrm_pol_put(pol);
4678 	if (nx_cur)
4679 		xfrm_states_put(x_cur, nx_cur);
4680 	if (nx_new)
4681 		xfrm_states_delete(x_new, nx_new);
4682 
4683 	return err;
4684 }
4685 EXPORT_SYMBOL(xfrm_migrate);
4686 #endif
4687