xref: /linux/net/xfrm/xfrm_policy.c (revision 088e88be5a380cc4e81963a9a02815da465d144f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * xfrm_policy.c
4  *
5  * Changes:
6  *	Mitsuru KANDA @USAGI
7  * 	Kazunori MIYAZAWA @USAGI
8  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9  * 		IPv6 support
10  * 	Kazunori MIYAZAWA @USAGI
11  * 	YOSHIFUJI Hideaki
12  * 		Split up af-specific portion
13  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
14  *
15  */
16 
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <net/dst.h>
33 #include <net/flow.h>
34 #include <net/xfrm.h>
35 #include <net/ip.h>
36 #if IS_ENABLED(CONFIG_IPV6_MIP6)
37 #include <net/mip6.h>
38 #endif
39 #ifdef CONFIG_XFRM_STATISTICS
40 #include <net/snmp.h>
41 #endif
42 
43 #include "xfrm_hash.h"
44 
45 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
46 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
47 #define XFRM_MAX_QUEUE_LEN	100
48 
49 struct xfrm_flo {
50 	struct dst_entry *dst_orig;
51 	u8 flags;
52 };
53 
54 /* prefixes smaller than this are stored in lists, not trees. */
55 #define INEXACT_PREFIXLEN_IPV4	16
56 #define INEXACT_PREFIXLEN_IPV6	48
57 
58 struct xfrm_pol_inexact_node {
59 	struct rb_node node;
60 	union {
61 		xfrm_address_t addr;
62 		struct rcu_head rcu;
63 	};
64 	u8 prefixlen;
65 
66 	struct rb_root root;
67 
68 	/* the policies matching this node, can be empty list */
69 	struct hlist_head hhead;
70 };
71 
72 /* xfrm inexact policy search tree:
73  * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
74  *  |
75  * +---- root_d: sorted by daddr:prefix
76  * |                 |
77  * |        xfrm_pol_inexact_node
78  * |                 |
79  * |                 +- root: sorted by saddr/prefix
80  * |                 |              |
81  * |                 |         xfrm_pol_inexact_node
82  * |                 |              |
83  * |                 |              + root: unused
84  * |                 |              |
85  * |                 |              + hhead: saddr:daddr policies
86  * |                 |
87  * |                 +- coarse policies and all any:daddr policies
88  * |
89  * +---- root_s: sorted by saddr:prefix
90  * |                 |
91  * |        xfrm_pol_inexact_node
92  * |                 |
93  * |                 + root: unused
94  * |                 |
95  * |                 + hhead: saddr:any policies
96  * |
97  * +---- coarse policies and all any:any policies
98  *
99  * Lookups return four candidate lists:
100  * 1. any:any list from top-level xfrm_pol_inexact_bin
101  * 2. any:daddr list from daddr tree
102  * 3. saddr:daddr list from 2nd level daddr tree
103  * 4. saddr:any list from saddr tree
104  *
105  * This result set then needs to be searched for the policy with
106  * the lowest priority.  If two results have same prio, youngest one wins.
107  */
108 
109 struct xfrm_pol_inexact_key {
110 	possible_net_t net;
111 	u32 if_id;
112 	u16 family;
113 	u8 dir, type;
114 };
115 
116 struct xfrm_pol_inexact_bin {
117 	struct xfrm_pol_inexact_key k;
118 	struct rhash_head head;
119 	/* list containing '*:*' policies */
120 	struct hlist_head hhead;
121 
122 	seqcount_t count;
123 	/* tree sorted by daddr/prefix */
124 	struct rb_root root_d;
125 
126 	/* tree sorted by saddr/prefix */
127 	struct rb_root root_s;
128 
129 	/* slow path below */
130 	struct list_head inexact_bins;
131 	struct rcu_head rcu;
132 };
133 
134 enum xfrm_pol_inexact_candidate_type {
135 	XFRM_POL_CAND_BOTH,
136 	XFRM_POL_CAND_SADDR,
137 	XFRM_POL_CAND_DADDR,
138 	XFRM_POL_CAND_ANY,
139 
140 	XFRM_POL_CAND_MAX,
141 };
142 
143 struct xfrm_pol_inexact_candidates {
144 	struct hlist_head *res[XFRM_POL_CAND_MAX];
145 };
146 
147 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
148 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
149 
150 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
151 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
152 						__read_mostly;
153 
154 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
155 static __read_mostly seqcount_t xfrm_policy_hash_generation;
156 
157 static struct rhashtable xfrm_policy_inexact_table;
158 static const struct rhashtable_params xfrm_pol_inexact_params;
159 
160 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
161 static int stale_bundle(struct dst_entry *dst);
162 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
163 static void xfrm_policy_queue_process(struct timer_list *t);
164 
165 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
166 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
167 						int dir);
168 
169 static struct xfrm_pol_inexact_bin *
170 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
171 			   u32 if_id);
172 
173 static struct xfrm_pol_inexact_bin *
174 xfrm_policy_inexact_lookup_rcu(struct net *net,
175 			       u8 type, u16 family, u8 dir, u32 if_id);
176 static struct xfrm_policy *
177 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
178 			bool excl);
179 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
180 					    struct xfrm_policy *policy);
181 
182 static bool
183 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
184 				    struct xfrm_pol_inexact_bin *b,
185 				    const xfrm_address_t *saddr,
186 				    const xfrm_address_t *daddr);
187 
188 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
189 {
190 	return refcount_inc_not_zero(&policy->refcnt);
191 }
192 
193 static inline bool
194 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
195 {
196 	const struct flowi4 *fl4 = &fl->u.ip4;
197 
198 	return  addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
199 		addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
200 		!((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
201 		!((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
202 		(fl4->flowi4_proto == sel->proto || !sel->proto) &&
203 		(fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
204 }
205 
206 static inline bool
207 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
208 {
209 	const struct flowi6 *fl6 = &fl->u.ip6;
210 
211 	return  addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
212 		addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
213 		!((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
214 		!((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
215 		(fl6->flowi6_proto == sel->proto || !sel->proto) &&
216 		(fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
217 }
218 
219 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
220 			 unsigned short family)
221 {
222 	switch (family) {
223 	case AF_INET:
224 		return __xfrm4_selector_match(sel, fl);
225 	case AF_INET6:
226 		return __xfrm6_selector_match(sel, fl);
227 	}
228 	return false;
229 }
230 
231 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
232 {
233 	const struct xfrm_policy_afinfo *afinfo;
234 
235 	if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
236 		return NULL;
237 	rcu_read_lock();
238 	afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
239 	if (unlikely(!afinfo))
240 		rcu_read_unlock();
241 	return afinfo;
242 }
243 
244 /* Called with rcu_read_lock(). */
245 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
246 {
247 	return rcu_dereference(xfrm_if_cb);
248 }
249 
250 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
251 				    const xfrm_address_t *saddr,
252 				    const xfrm_address_t *daddr,
253 				    int family, u32 mark)
254 {
255 	const struct xfrm_policy_afinfo *afinfo;
256 	struct dst_entry *dst;
257 
258 	afinfo = xfrm_policy_get_afinfo(family);
259 	if (unlikely(afinfo == NULL))
260 		return ERR_PTR(-EAFNOSUPPORT);
261 
262 	dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
263 
264 	rcu_read_unlock();
265 
266 	return dst;
267 }
268 EXPORT_SYMBOL(__xfrm_dst_lookup);
269 
270 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
271 						int tos, int oif,
272 						xfrm_address_t *prev_saddr,
273 						xfrm_address_t *prev_daddr,
274 						int family, u32 mark)
275 {
276 	struct net *net = xs_net(x);
277 	xfrm_address_t *saddr = &x->props.saddr;
278 	xfrm_address_t *daddr = &x->id.daddr;
279 	struct dst_entry *dst;
280 
281 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
282 		saddr = x->coaddr;
283 		daddr = prev_daddr;
284 	}
285 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
286 		saddr = prev_saddr;
287 		daddr = x->coaddr;
288 	}
289 
290 	dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
291 
292 	if (!IS_ERR(dst)) {
293 		if (prev_saddr != saddr)
294 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
295 		if (prev_daddr != daddr)
296 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
297 	}
298 
299 	return dst;
300 }
301 
302 static inline unsigned long make_jiffies(long secs)
303 {
304 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
305 		return MAX_SCHEDULE_TIMEOUT-1;
306 	else
307 		return secs*HZ;
308 }
309 
310 static void xfrm_policy_timer(struct timer_list *t)
311 {
312 	struct xfrm_policy *xp = from_timer(xp, t, timer);
313 	time64_t now = ktime_get_real_seconds();
314 	time64_t next = TIME64_MAX;
315 	int warn = 0;
316 	int dir;
317 
318 	read_lock(&xp->lock);
319 
320 	if (unlikely(xp->walk.dead))
321 		goto out;
322 
323 	dir = xfrm_policy_id2dir(xp->index);
324 
325 	if (xp->lft.hard_add_expires_seconds) {
326 		time64_t tmo = xp->lft.hard_add_expires_seconds +
327 			xp->curlft.add_time - now;
328 		if (tmo <= 0)
329 			goto expired;
330 		if (tmo < next)
331 			next = tmo;
332 	}
333 	if (xp->lft.hard_use_expires_seconds) {
334 		time64_t tmo = xp->lft.hard_use_expires_seconds +
335 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
336 		if (tmo <= 0)
337 			goto expired;
338 		if (tmo < next)
339 			next = tmo;
340 	}
341 	if (xp->lft.soft_add_expires_seconds) {
342 		time64_t tmo = xp->lft.soft_add_expires_seconds +
343 			xp->curlft.add_time - now;
344 		if (tmo <= 0) {
345 			warn = 1;
346 			tmo = XFRM_KM_TIMEOUT;
347 		}
348 		if (tmo < next)
349 			next = tmo;
350 	}
351 	if (xp->lft.soft_use_expires_seconds) {
352 		time64_t tmo = xp->lft.soft_use_expires_seconds +
353 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
354 		if (tmo <= 0) {
355 			warn = 1;
356 			tmo = XFRM_KM_TIMEOUT;
357 		}
358 		if (tmo < next)
359 			next = tmo;
360 	}
361 
362 	if (warn)
363 		km_policy_expired(xp, dir, 0, 0);
364 	if (next != TIME64_MAX &&
365 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
366 		xfrm_pol_hold(xp);
367 
368 out:
369 	read_unlock(&xp->lock);
370 	xfrm_pol_put(xp);
371 	return;
372 
373 expired:
374 	read_unlock(&xp->lock);
375 	if (!xfrm_policy_delete(xp, dir))
376 		km_policy_expired(xp, dir, 1, 0);
377 	xfrm_pol_put(xp);
378 }
379 
380 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
381  * SPD calls.
382  */
383 
384 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
385 {
386 	struct xfrm_policy *policy;
387 
388 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
389 
390 	if (policy) {
391 		write_pnet(&policy->xp_net, net);
392 		INIT_LIST_HEAD(&policy->walk.all);
393 		INIT_HLIST_NODE(&policy->bydst_inexact_list);
394 		INIT_HLIST_NODE(&policy->bydst);
395 		INIT_HLIST_NODE(&policy->byidx);
396 		rwlock_init(&policy->lock);
397 		refcount_set(&policy->refcnt, 1);
398 		skb_queue_head_init(&policy->polq.hold_queue);
399 		timer_setup(&policy->timer, xfrm_policy_timer, 0);
400 		timer_setup(&policy->polq.hold_timer,
401 			    xfrm_policy_queue_process, 0);
402 	}
403 	return policy;
404 }
405 EXPORT_SYMBOL(xfrm_policy_alloc);
406 
407 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
408 {
409 	struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
410 
411 	security_xfrm_policy_free(policy->security);
412 	kfree(policy);
413 }
414 
415 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
416 
417 void xfrm_policy_destroy(struct xfrm_policy *policy)
418 {
419 	BUG_ON(!policy->walk.dead);
420 
421 	if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
422 		BUG();
423 
424 	call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
425 }
426 EXPORT_SYMBOL(xfrm_policy_destroy);
427 
428 /* Rule must be locked. Release descendant resources, announce
429  * entry dead. The rule must be unlinked from lists to the moment.
430  */
431 
432 static void xfrm_policy_kill(struct xfrm_policy *policy)
433 {
434 	policy->walk.dead = 1;
435 
436 	atomic_inc(&policy->genid);
437 
438 	if (del_timer(&policy->polq.hold_timer))
439 		xfrm_pol_put(policy);
440 	skb_queue_purge(&policy->polq.hold_queue);
441 
442 	if (del_timer(&policy->timer))
443 		xfrm_pol_put(policy);
444 
445 	xfrm_pol_put(policy);
446 }
447 
448 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
449 
450 static inline unsigned int idx_hash(struct net *net, u32 index)
451 {
452 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
453 }
454 
455 /* calculate policy hash thresholds */
456 static void __get_hash_thresh(struct net *net,
457 			      unsigned short family, int dir,
458 			      u8 *dbits, u8 *sbits)
459 {
460 	switch (family) {
461 	case AF_INET:
462 		*dbits = net->xfrm.policy_bydst[dir].dbits4;
463 		*sbits = net->xfrm.policy_bydst[dir].sbits4;
464 		break;
465 
466 	case AF_INET6:
467 		*dbits = net->xfrm.policy_bydst[dir].dbits6;
468 		*sbits = net->xfrm.policy_bydst[dir].sbits6;
469 		break;
470 
471 	default:
472 		*dbits = 0;
473 		*sbits = 0;
474 	}
475 }
476 
477 static struct hlist_head *policy_hash_bysel(struct net *net,
478 					    const struct xfrm_selector *sel,
479 					    unsigned short family, int dir)
480 {
481 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
482 	unsigned int hash;
483 	u8 dbits;
484 	u8 sbits;
485 
486 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
487 	hash = __sel_hash(sel, family, hmask, dbits, sbits);
488 
489 	if (hash == hmask + 1)
490 		return NULL;
491 
492 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
493 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
494 }
495 
496 static struct hlist_head *policy_hash_direct(struct net *net,
497 					     const xfrm_address_t *daddr,
498 					     const xfrm_address_t *saddr,
499 					     unsigned short family, int dir)
500 {
501 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
502 	unsigned int hash;
503 	u8 dbits;
504 	u8 sbits;
505 
506 	__get_hash_thresh(net, family, dir, &dbits, &sbits);
507 	hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
508 
509 	return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
510 		     lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
511 }
512 
513 static void xfrm_dst_hash_transfer(struct net *net,
514 				   struct hlist_head *list,
515 				   struct hlist_head *ndsttable,
516 				   unsigned int nhashmask,
517 				   int dir)
518 {
519 	struct hlist_node *tmp, *entry0 = NULL;
520 	struct xfrm_policy *pol;
521 	unsigned int h0 = 0;
522 	u8 dbits;
523 	u8 sbits;
524 
525 redo:
526 	hlist_for_each_entry_safe(pol, tmp, list, bydst) {
527 		unsigned int h;
528 
529 		__get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
530 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
531 				pol->family, nhashmask, dbits, sbits);
532 		if (!entry0) {
533 			hlist_del_rcu(&pol->bydst);
534 			hlist_add_head_rcu(&pol->bydst, ndsttable + h);
535 			h0 = h;
536 		} else {
537 			if (h != h0)
538 				continue;
539 			hlist_del_rcu(&pol->bydst);
540 			hlist_add_behind_rcu(&pol->bydst, entry0);
541 		}
542 		entry0 = &pol->bydst;
543 	}
544 	if (!hlist_empty(list)) {
545 		entry0 = NULL;
546 		goto redo;
547 	}
548 }
549 
550 static void xfrm_idx_hash_transfer(struct hlist_head *list,
551 				   struct hlist_head *nidxtable,
552 				   unsigned int nhashmask)
553 {
554 	struct hlist_node *tmp;
555 	struct xfrm_policy *pol;
556 
557 	hlist_for_each_entry_safe(pol, tmp, list, byidx) {
558 		unsigned int h;
559 
560 		h = __idx_hash(pol->index, nhashmask);
561 		hlist_add_head(&pol->byidx, nidxtable+h);
562 	}
563 }
564 
565 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
566 {
567 	return ((old_hmask + 1) << 1) - 1;
568 }
569 
570 static void xfrm_bydst_resize(struct net *net, int dir)
571 {
572 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
573 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
574 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
575 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
576 	struct hlist_head *odst;
577 	int i;
578 
579 	if (!ndst)
580 		return;
581 
582 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
583 	write_seqcount_begin(&xfrm_policy_hash_generation);
584 
585 	odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
586 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
587 
588 	for (i = hmask; i >= 0; i--)
589 		xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
590 
591 	rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
592 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
593 
594 	write_seqcount_end(&xfrm_policy_hash_generation);
595 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
596 
597 	synchronize_rcu();
598 
599 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
600 }
601 
602 static void xfrm_byidx_resize(struct net *net, int total)
603 {
604 	unsigned int hmask = net->xfrm.policy_idx_hmask;
605 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
606 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
607 	struct hlist_head *oidx = net->xfrm.policy_byidx;
608 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
609 	int i;
610 
611 	if (!nidx)
612 		return;
613 
614 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
615 
616 	for (i = hmask; i >= 0; i--)
617 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
618 
619 	net->xfrm.policy_byidx = nidx;
620 	net->xfrm.policy_idx_hmask = nhashmask;
621 
622 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
623 
624 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
625 }
626 
627 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
628 {
629 	unsigned int cnt = net->xfrm.policy_count[dir];
630 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
631 
632 	if (total)
633 		*total += cnt;
634 
635 	if ((hmask + 1) < xfrm_policy_hashmax &&
636 	    cnt > hmask)
637 		return 1;
638 
639 	return 0;
640 }
641 
642 static inline int xfrm_byidx_should_resize(struct net *net, int total)
643 {
644 	unsigned int hmask = net->xfrm.policy_idx_hmask;
645 
646 	if ((hmask + 1) < xfrm_policy_hashmax &&
647 	    total > hmask)
648 		return 1;
649 
650 	return 0;
651 }
652 
653 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
654 {
655 	si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
656 	si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
657 	si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
658 	si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
659 	si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
660 	si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
661 	si->spdhcnt = net->xfrm.policy_idx_hmask;
662 	si->spdhmcnt = xfrm_policy_hashmax;
663 }
664 EXPORT_SYMBOL(xfrm_spd_getinfo);
665 
666 static DEFINE_MUTEX(hash_resize_mutex);
667 static void xfrm_hash_resize(struct work_struct *work)
668 {
669 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
670 	int dir, total;
671 
672 	mutex_lock(&hash_resize_mutex);
673 
674 	total = 0;
675 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
676 		if (xfrm_bydst_should_resize(net, dir, &total))
677 			xfrm_bydst_resize(net, dir);
678 	}
679 	if (xfrm_byidx_should_resize(net, total))
680 		xfrm_byidx_resize(net, total);
681 
682 	mutex_unlock(&hash_resize_mutex);
683 }
684 
685 /* Make sure *pol can be inserted into fastbin.
686  * Useful to check that later insert requests will be sucessful
687  * (provided xfrm_policy_lock is held throughout).
688  */
689 static struct xfrm_pol_inexact_bin *
690 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
691 {
692 	struct xfrm_pol_inexact_bin *bin, *prev;
693 	struct xfrm_pol_inexact_key k = {
694 		.family = pol->family,
695 		.type = pol->type,
696 		.dir = dir,
697 		.if_id = pol->if_id,
698 	};
699 	struct net *net = xp_net(pol);
700 
701 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
702 
703 	write_pnet(&k.net, net);
704 	bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
705 				     xfrm_pol_inexact_params);
706 	if (bin)
707 		return bin;
708 
709 	bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
710 	if (!bin)
711 		return NULL;
712 
713 	bin->k = k;
714 	INIT_HLIST_HEAD(&bin->hhead);
715 	bin->root_d = RB_ROOT;
716 	bin->root_s = RB_ROOT;
717 	seqcount_init(&bin->count);
718 
719 	prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
720 						&bin->k, &bin->head,
721 						xfrm_pol_inexact_params);
722 	if (!prev) {
723 		list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
724 		return bin;
725 	}
726 
727 	kfree(bin);
728 
729 	return IS_ERR(prev) ? NULL : prev;
730 }
731 
732 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
733 					       int family, u8 prefixlen)
734 {
735 	if (xfrm_addr_any(addr, family))
736 		return true;
737 
738 	if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
739 		return true;
740 
741 	if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
742 		return true;
743 
744 	return false;
745 }
746 
747 static bool
748 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
749 {
750 	const xfrm_address_t *addr;
751 	bool saddr_any, daddr_any;
752 	u8 prefixlen;
753 
754 	addr = &policy->selector.saddr;
755 	prefixlen = policy->selector.prefixlen_s;
756 
757 	saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
758 						       policy->family,
759 						       prefixlen);
760 	addr = &policy->selector.daddr;
761 	prefixlen = policy->selector.prefixlen_d;
762 	daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
763 						       policy->family,
764 						       prefixlen);
765 	return saddr_any && daddr_any;
766 }
767 
768 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
769 				       const xfrm_address_t *addr, u8 prefixlen)
770 {
771 	node->addr = *addr;
772 	node->prefixlen = prefixlen;
773 }
774 
775 static struct xfrm_pol_inexact_node *
776 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
777 {
778 	struct xfrm_pol_inexact_node *node;
779 
780 	node = kzalloc(sizeof(*node), GFP_ATOMIC);
781 	if (node)
782 		xfrm_pol_inexact_node_init(node, addr, prefixlen);
783 
784 	return node;
785 }
786 
787 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
788 				  const xfrm_address_t *b,
789 				  u8 prefixlen, u16 family)
790 {
791 	unsigned int pdw, pbi;
792 	int delta = 0;
793 
794 	switch (family) {
795 	case AF_INET:
796 		if (sizeof(long) == 4 && prefixlen == 0)
797 			return ntohl(a->a4) - ntohl(b->a4);
798 		return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) -
799 		       (ntohl(b->a4) & ((~0UL << (32 - prefixlen))));
800 	case AF_INET6:
801 		pdw = prefixlen >> 5;
802 		pbi = prefixlen & 0x1f;
803 
804 		if (pdw) {
805 			delta = memcmp(a->a6, b->a6, pdw << 2);
806 			if (delta)
807 				return delta;
808 		}
809 		if (pbi) {
810 			u32 mask = ~0u << (32 - pbi);
811 
812 			delta = (ntohl(a->a6[pdw]) & mask) -
813 				(ntohl(b->a6[pdw]) & mask);
814 		}
815 		break;
816 	default:
817 		break;
818 	}
819 
820 	return delta;
821 }
822 
823 static void xfrm_policy_inexact_list_reinsert(struct net *net,
824 					      struct xfrm_pol_inexact_node *n,
825 					      u16 family)
826 {
827 	unsigned int matched_s, matched_d;
828 	struct xfrm_policy *policy, *p;
829 
830 	matched_s = 0;
831 	matched_d = 0;
832 
833 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
834 		struct hlist_node *newpos = NULL;
835 		bool matches_s, matches_d;
836 
837 		if (!policy->bydst_reinsert)
838 			continue;
839 
840 		WARN_ON_ONCE(policy->family != family);
841 
842 		policy->bydst_reinsert = false;
843 		hlist_for_each_entry(p, &n->hhead, bydst) {
844 			if (policy->priority > p->priority)
845 				newpos = &p->bydst;
846 			else if (policy->priority == p->priority &&
847 				 policy->pos > p->pos)
848 				newpos = &p->bydst;
849 			else
850 				break;
851 		}
852 
853 		if (newpos)
854 			hlist_add_behind_rcu(&policy->bydst, newpos);
855 		else
856 			hlist_add_head_rcu(&policy->bydst, &n->hhead);
857 
858 		/* paranoia checks follow.
859 		 * Check that the reinserted policy matches at least
860 		 * saddr or daddr for current node prefix.
861 		 *
862 		 * Matching both is fine, matching saddr in one policy
863 		 * (but not daddr) and then matching only daddr in another
864 		 * is a bug.
865 		 */
866 		matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
867 						   &n->addr,
868 						   n->prefixlen,
869 						   family) == 0;
870 		matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
871 						   &n->addr,
872 						   n->prefixlen,
873 						   family) == 0;
874 		if (matches_s && matches_d)
875 			continue;
876 
877 		WARN_ON_ONCE(!matches_s && !matches_d);
878 		if (matches_s)
879 			matched_s++;
880 		if (matches_d)
881 			matched_d++;
882 		WARN_ON_ONCE(matched_s && matched_d);
883 	}
884 }
885 
886 static void xfrm_policy_inexact_node_reinsert(struct net *net,
887 					      struct xfrm_pol_inexact_node *n,
888 					      struct rb_root *new,
889 					      u16 family)
890 {
891 	struct xfrm_pol_inexact_node *node;
892 	struct rb_node **p, *parent;
893 
894 	/* we should not have another subtree here */
895 	WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
896 restart:
897 	parent = NULL;
898 	p = &new->rb_node;
899 	while (*p) {
900 		u8 prefixlen;
901 		int delta;
902 
903 		parent = *p;
904 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
905 
906 		prefixlen = min(node->prefixlen, n->prefixlen);
907 
908 		delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
909 					       prefixlen, family);
910 		if (delta < 0) {
911 			p = &parent->rb_left;
912 		} else if (delta > 0) {
913 			p = &parent->rb_right;
914 		} else {
915 			struct xfrm_policy *tmp;
916 
917 			hlist_for_each_entry(tmp, &n->hhead, bydst) {
918 				tmp->bydst_reinsert = true;
919 				hlist_del_rcu(&tmp->bydst);
920 			}
921 
922 			xfrm_policy_inexact_list_reinsert(net, node, family);
923 
924 			if (node->prefixlen == n->prefixlen) {
925 				kfree_rcu(n, rcu);
926 				return;
927 			}
928 
929 			rb_erase(*p, new);
930 			kfree_rcu(n, rcu);
931 			n = node;
932 			n->prefixlen = prefixlen;
933 			goto restart;
934 		}
935 	}
936 
937 	rb_link_node_rcu(&n->node, parent, p);
938 	rb_insert_color(&n->node, new);
939 }
940 
941 /* merge nodes v and n */
942 static void xfrm_policy_inexact_node_merge(struct net *net,
943 					   struct xfrm_pol_inexact_node *v,
944 					   struct xfrm_pol_inexact_node *n,
945 					   u16 family)
946 {
947 	struct xfrm_pol_inexact_node *node;
948 	struct xfrm_policy *tmp;
949 	struct rb_node *rnode;
950 
951 	/* To-be-merged node v has a subtree.
952 	 *
953 	 * Dismantle it and insert its nodes to n->root.
954 	 */
955 	while ((rnode = rb_first(&v->root)) != NULL) {
956 		node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
957 		rb_erase(&node->node, &v->root);
958 		xfrm_policy_inexact_node_reinsert(net, node, &n->root,
959 						  family);
960 	}
961 
962 	hlist_for_each_entry(tmp, &v->hhead, bydst) {
963 		tmp->bydst_reinsert = true;
964 		hlist_del_rcu(&tmp->bydst);
965 	}
966 
967 	xfrm_policy_inexact_list_reinsert(net, n, family);
968 }
969 
970 static struct xfrm_pol_inexact_node *
971 xfrm_policy_inexact_insert_node(struct net *net,
972 				struct rb_root *root,
973 				xfrm_address_t *addr,
974 				u16 family, u8 prefixlen, u8 dir)
975 {
976 	struct xfrm_pol_inexact_node *cached = NULL;
977 	struct rb_node **p, *parent = NULL;
978 	struct xfrm_pol_inexact_node *node;
979 
980 	p = &root->rb_node;
981 	while (*p) {
982 		int delta;
983 
984 		parent = *p;
985 		node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
986 
987 		delta = xfrm_policy_addr_delta(addr, &node->addr,
988 					       node->prefixlen,
989 					       family);
990 		if (delta == 0 && prefixlen >= node->prefixlen) {
991 			WARN_ON_ONCE(cached); /* ipsec policies got lost */
992 			return node;
993 		}
994 
995 		if (delta < 0)
996 			p = &parent->rb_left;
997 		else
998 			p = &parent->rb_right;
999 
1000 		if (prefixlen < node->prefixlen) {
1001 			delta = xfrm_policy_addr_delta(addr, &node->addr,
1002 						       prefixlen,
1003 						       family);
1004 			if (delta)
1005 				continue;
1006 
1007 			/* This node is a subnet of the new prefix. It needs
1008 			 * to be removed and re-inserted with the smaller
1009 			 * prefix and all nodes that are now also covered
1010 			 * by the reduced prefixlen.
1011 			 */
1012 			rb_erase(&node->node, root);
1013 
1014 			if (!cached) {
1015 				xfrm_pol_inexact_node_init(node, addr,
1016 							   prefixlen);
1017 				cached = node;
1018 			} else {
1019 				/* This node also falls within the new
1020 				 * prefixlen. Merge the to-be-reinserted
1021 				 * node and this one.
1022 				 */
1023 				xfrm_policy_inexact_node_merge(net, node,
1024 							       cached, family);
1025 				kfree_rcu(node, rcu);
1026 			}
1027 
1028 			/* restart */
1029 			p = &root->rb_node;
1030 			parent = NULL;
1031 		}
1032 	}
1033 
1034 	node = cached;
1035 	if (!node) {
1036 		node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1037 		if (!node)
1038 			return NULL;
1039 	}
1040 
1041 	rb_link_node_rcu(&node->node, parent, p);
1042 	rb_insert_color(&node->node, root);
1043 
1044 	return node;
1045 }
1046 
1047 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1048 {
1049 	struct xfrm_pol_inexact_node *node;
1050 	struct rb_node *rn = rb_first(r);
1051 
1052 	while (rn) {
1053 		node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1054 
1055 		xfrm_policy_inexact_gc_tree(&node->root, rm);
1056 		rn = rb_next(rn);
1057 
1058 		if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1059 			WARN_ON_ONCE(rm);
1060 			continue;
1061 		}
1062 
1063 		rb_erase(&node->node, r);
1064 		kfree_rcu(node, rcu);
1065 	}
1066 }
1067 
1068 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1069 {
1070 	write_seqcount_begin(&b->count);
1071 	xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1072 	xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1073 	write_seqcount_end(&b->count);
1074 
1075 	if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1076 	    !hlist_empty(&b->hhead)) {
1077 		WARN_ON_ONCE(net_exit);
1078 		return;
1079 	}
1080 
1081 	if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1082 				   xfrm_pol_inexact_params) == 0) {
1083 		list_del(&b->inexact_bins);
1084 		kfree_rcu(b, rcu);
1085 	}
1086 }
1087 
1088 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1089 {
1090 	struct net *net = read_pnet(&b->k.net);
1091 
1092 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1093 	__xfrm_policy_inexact_prune_bin(b, false);
1094 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1095 }
1096 
1097 static void __xfrm_policy_inexact_flush(struct net *net)
1098 {
1099 	struct xfrm_pol_inexact_bin *bin, *t;
1100 
1101 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1102 
1103 	list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1104 		__xfrm_policy_inexact_prune_bin(bin, false);
1105 }
1106 
1107 static struct hlist_head *
1108 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1109 				struct xfrm_policy *policy, u8 dir)
1110 {
1111 	struct xfrm_pol_inexact_node *n;
1112 	struct net *net;
1113 
1114 	net = xp_net(policy);
1115 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1116 
1117 	if (xfrm_policy_inexact_insert_use_any_list(policy))
1118 		return &bin->hhead;
1119 
1120 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1121 					       policy->family,
1122 					       policy->selector.prefixlen_d)) {
1123 		write_seqcount_begin(&bin->count);
1124 		n = xfrm_policy_inexact_insert_node(net,
1125 						    &bin->root_s,
1126 						    &policy->selector.saddr,
1127 						    policy->family,
1128 						    policy->selector.prefixlen_s,
1129 						    dir);
1130 		write_seqcount_end(&bin->count);
1131 		if (!n)
1132 			return NULL;
1133 
1134 		return &n->hhead;
1135 	}
1136 
1137 	/* daddr is fixed */
1138 	write_seqcount_begin(&bin->count);
1139 	n = xfrm_policy_inexact_insert_node(net,
1140 					    &bin->root_d,
1141 					    &policy->selector.daddr,
1142 					    policy->family,
1143 					    policy->selector.prefixlen_d, dir);
1144 	write_seqcount_end(&bin->count);
1145 	if (!n)
1146 		return NULL;
1147 
1148 	/* saddr is wildcard */
1149 	if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1150 					       policy->family,
1151 					       policy->selector.prefixlen_s))
1152 		return &n->hhead;
1153 
1154 	write_seqcount_begin(&bin->count);
1155 	n = xfrm_policy_inexact_insert_node(net,
1156 					    &n->root,
1157 					    &policy->selector.saddr,
1158 					    policy->family,
1159 					    policy->selector.prefixlen_s, dir);
1160 	write_seqcount_end(&bin->count);
1161 	if (!n)
1162 		return NULL;
1163 
1164 	return &n->hhead;
1165 }
1166 
1167 static struct xfrm_policy *
1168 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1169 {
1170 	struct xfrm_pol_inexact_bin *bin;
1171 	struct xfrm_policy *delpol;
1172 	struct hlist_head *chain;
1173 	struct net *net;
1174 
1175 	bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1176 	if (!bin)
1177 		return ERR_PTR(-ENOMEM);
1178 
1179 	net = xp_net(policy);
1180 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1181 
1182 	chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1183 	if (!chain) {
1184 		__xfrm_policy_inexact_prune_bin(bin, false);
1185 		return ERR_PTR(-ENOMEM);
1186 	}
1187 
1188 	delpol = xfrm_policy_insert_list(chain, policy, excl);
1189 	if (delpol && excl) {
1190 		__xfrm_policy_inexact_prune_bin(bin, false);
1191 		return ERR_PTR(-EEXIST);
1192 	}
1193 
1194 	chain = &net->xfrm.policy_inexact[dir];
1195 	xfrm_policy_insert_inexact_list(chain, policy);
1196 
1197 	if (delpol)
1198 		__xfrm_policy_inexact_prune_bin(bin, false);
1199 
1200 	return delpol;
1201 }
1202 
1203 static void xfrm_hash_rebuild(struct work_struct *work)
1204 {
1205 	struct net *net = container_of(work, struct net,
1206 				       xfrm.policy_hthresh.work);
1207 	unsigned int hmask;
1208 	struct xfrm_policy *pol;
1209 	struct xfrm_policy *policy;
1210 	struct hlist_head *chain;
1211 	struct hlist_head *odst;
1212 	struct hlist_node *newpos;
1213 	int i;
1214 	int dir;
1215 	unsigned seq;
1216 	u8 lbits4, rbits4, lbits6, rbits6;
1217 
1218 	mutex_lock(&hash_resize_mutex);
1219 
1220 	/* read selector prefixlen thresholds */
1221 	do {
1222 		seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1223 
1224 		lbits4 = net->xfrm.policy_hthresh.lbits4;
1225 		rbits4 = net->xfrm.policy_hthresh.rbits4;
1226 		lbits6 = net->xfrm.policy_hthresh.lbits6;
1227 		rbits6 = net->xfrm.policy_hthresh.rbits6;
1228 	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1229 
1230 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1231 	write_seqcount_begin(&xfrm_policy_hash_generation);
1232 
1233 	/* make sure that we can insert the indirect policies again before
1234 	 * we start with destructive action.
1235 	 */
1236 	list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1237 		struct xfrm_pol_inexact_bin *bin;
1238 		u8 dbits, sbits;
1239 
1240 		dir = xfrm_policy_id2dir(policy->index);
1241 		if (policy->walk.dead || dir >= XFRM_POLICY_MAX)
1242 			continue;
1243 
1244 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1245 			if (policy->family == AF_INET) {
1246 				dbits = rbits4;
1247 				sbits = lbits4;
1248 			} else {
1249 				dbits = rbits6;
1250 				sbits = lbits6;
1251 			}
1252 		} else {
1253 			if (policy->family == AF_INET) {
1254 				dbits = lbits4;
1255 				sbits = rbits4;
1256 			} else {
1257 				dbits = lbits6;
1258 				sbits = rbits6;
1259 			}
1260 		}
1261 
1262 		if (policy->selector.prefixlen_d < dbits ||
1263 		    policy->selector.prefixlen_s < sbits)
1264 			continue;
1265 
1266 		bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1267 		if (!bin)
1268 			goto out_unlock;
1269 
1270 		if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1271 			goto out_unlock;
1272 	}
1273 
1274 	/* reset the bydst and inexact table in all directions */
1275 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1276 		struct hlist_node *n;
1277 
1278 		hlist_for_each_entry_safe(policy, n,
1279 					  &net->xfrm.policy_inexact[dir],
1280 					  bydst_inexact_list) {
1281 			hlist_del_rcu(&policy->bydst);
1282 			hlist_del_init(&policy->bydst_inexact_list);
1283 		}
1284 
1285 		hmask = net->xfrm.policy_bydst[dir].hmask;
1286 		odst = net->xfrm.policy_bydst[dir].table;
1287 		for (i = hmask; i >= 0; i--) {
1288 			hlist_for_each_entry_safe(policy, n, odst + i, bydst)
1289 				hlist_del_rcu(&policy->bydst);
1290 		}
1291 		if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1292 			/* dir out => dst = remote, src = local */
1293 			net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1294 			net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1295 			net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1296 			net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1297 		} else {
1298 			/* dir in/fwd => dst = local, src = remote */
1299 			net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1300 			net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1301 			net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1302 			net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1303 		}
1304 	}
1305 
1306 	/* re-insert all policies by order of creation */
1307 	list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1308 		if (policy->walk.dead)
1309 			continue;
1310 		dir = xfrm_policy_id2dir(policy->index);
1311 		if (dir >= XFRM_POLICY_MAX) {
1312 			/* skip socket policies */
1313 			continue;
1314 		}
1315 		newpos = NULL;
1316 		chain = policy_hash_bysel(net, &policy->selector,
1317 					  policy->family, dir);
1318 
1319 		if (!chain) {
1320 			void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1321 
1322 			WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1323 			continue;
1324 		}
1325 
1326 		hlist_for_each_entry(pol, chain, bydst) {
1327 			if (policy->priority >= pol->priority)
1328 				newpos = &pol->bydst;
1329 			else
1330 				break;
1331 		}
1332 		if (newpos)
1333 			hlist_add_behind_rcu(&policy->bydst, newpos);
1334 		else
1335 			hlist_add_head_rcu(&policy->bydst, chain);
1336 	}
1337 
1338 out_unlock:
1339 	__xfrm_policy_inexact_flush(net);
1340 	write_seqcount_end(&xfrm_policy_hash_generation);
1341 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1342 
1343 	mutex_unlock(&hash_resize_mutex);
1344 }
1345 
1346 void xfrm_policy_hash_rebuild(struct net *net)
1347 {
1348 	schedule_work(&net->xfrm.policy_hthresh.work);
1349 }
1350 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1351 
1352 /* Generate new index... KAME seems to generate them ordered by cost
1353  * of an absolute inpredictability of ordering of rules. This will not pass. */
1354 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1355 {
1356 	static u32 idx_generator;
1357 
1358 	for (;;) {
1359 		struct hlist_head *list;
1360 		struct xfrm_policy *p;
1361 		u32 idx;
1362 		int found;
1363 
1364 		if (!index) {
1365 			idx = (idx_generator | dir);
1366 			idx_generator += 8;
1367 		} else {
1368 			idx = index;
1369 			index = 0;
1370 		}
1371 
1372 		if (idx == 0)
1373 			idx = 8;
1374 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
1375 		found = 0;
1376 		hlist_for_each_entry(p, list, byidx) {
1377 			if (p->index == idx) {
1378 				found = 1;
1379 				break;
1380 			}
1381 		}
1382 		if (!found)
1383 			return idx;
1384 	}
1385 }
1386 
1387 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1388 {
1389 	u32 *p1 = (u32 *) s1;
1390 	u32 *p2 = (u32 *) s2;
1391 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
1392 	int i;
1393 
1394 	for (i = 0; i < len; i++) {
1395 		if (p1[i] != p2[i])
1396 			return 1;
1397 	}
1398 
1399 	return 0;
1400 }
1401 
1402 static void xfrm_policy_requeue(struct xfrm_policy *old,
1403 				struct xfrm_policy *new)
1404 {
1405 	struct xfrm_policy_queue *pq = &old->polq;
1406 	struct sk_buff_head list;
1407 
1408 	if (skb_queue_empty(&pq->hold_queue))
1409 		return;
1410 
1411 	__skb_queue_head_init(&list);
1412 
1413 	spin_lock_bh(&pq->hold_queue.lock);
1414 	skb_queue_splice_init(&pq->hold_queue, &list);
1415 	if (del_timer(&pq->hold_timer))
1416 		xfrm_pol_put(old);
1417 	spin_unlock_bh(&pq->hold_queue.lock);
1418 
1419 	pq = &new->polq;
1420 
1421 	spin_lock_bh(&pq->hold_queue.lock);
1422 	skb_queue_splice(&list, &pq->hold_queue);
1423 	pq->timeout = XFRM_QUEUE_TMO_MIN;
1424 	if (!mod_timer(&pq->hold_timer, jiffies))
1425 		xfrm_pol_hold(new);
1426 	spin_unlock_bh(&pq->hold_queue.lock);
1427 }
1428 
1429 static bool xfrm_policy_mark_match(struct xfrm_policy *policy,
1430 				   struct xfrm_policy *pol)
1431 {
1432 	u32 mark = policy->mark.v & policy->mark.m;
1433 
1434 	if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m)
1435 		return true;
1436 
1437 	if ((mark & pol->mark.m) == pol->mark.v &&
1438 	    policy->priority == pol->priority)
1439 		return true;
1440 
1441 	return false;
1442 }
1443 
1444 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1445 {
1446 	const struct xfrm_pol_inexact_key *k = data;
1447 	u32 a = k->type << 24 | k->dir << 16 | k->family;
1448 
1449 	return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1450 			    seed);
1451 }
1452 
1453 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1454 {
1455 	const struct xfrm_pol_inexact_bin *b = data;
1456 
1457 	return xfrm_pol_bin_key(&b->k, 0, seed);
1458 }
1459 
1460 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1461 			    const void *ptr)
1462 {
1463 	const struct xfrm_pol_inexact_key *key = arg->key;
1464 	const struct xfrm_pol_inexact_bin *b = ptr;
1465 	int ret;
1466 
1467 	if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1468 		return -1;
1469 
1470 	ret = b->k.dir ^ key->dir;
1471 	if (ret)
1472 		return ret;
1473 
1474 	ret = b->k.type ^ key->type;
1475 	if (ret)
1476 		return ret;
1477 
1478 	ret = b->k.family ^ key->family;
1479 	if (ret)
1480 		return ret;
1481 
1482 	return b->k.if_id ^ key->if_id;
1483 }
1484 
1485 static const struct rhashtable_params xfrm_pol_inexact_params = {
1486 	.head_offset		= offsetof(struct xfrm_pol_inexact_bin, head),
1487 	.hashfn			= xfrm_pol_bin_key,
1488 	.obj_hashfn		= xfrm_pol_bin_obj,
1489 	.obj_cmpfn		= xfrm_pol_bin_cmp,
1490 	.automatic_shrinking	= true,
1491 };
1492 
1493 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain,
1494 					    struct xfrm_policy *policy)
1495 {
1496 	struct xfrm_policy *pol, *delpol = NULL;
1497 	struct hlist_node *newpos = NULL;
1498 	int i = 0;
1499 
1500 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1501 		if (pol->type == policy->type &&
1502 		    pol->if_id == policy->if_id &&
1503 		    !selector_cmp(&pol->selector, &policy->selector) &&
1504 		    xfrm_policy_mark_match(policy, pol) &&
1505 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1506 		    !WARN_ON(delpol)) {
1507 			delpol = pol;
1508 			if (policy->priority > pol->priority)
1509 				continue;
1510 		} else if (policy->priority >= pol->priority) {
1511 			newpos = &pol->bydst_inexact_list;
1512 			continue;
1513 		}
1514 		if (delpol)
1515 			break;
1516 	}
1517 
1518 	if (newpos)
1519 		hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos);
1520 	else
1521 		hlist_add_head_rcu(&policy->bydst_inexact_list, chain);
1522 
1523 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
1524 		pol->pos = i;
1525 		i++;
1526 	}
1527 }
1528 
1529 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1530 						   struct xfrm_policy *policy,
1531 						   bool excl)
1532 {
1533 	struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1534 
1535 	hlist_for_each_entry(pol, chain, bydst) {
1536 		if (pol->type == policy->type &&
1537 		    pol->if_id == policy->if_id &&
1538 		    !selector_cmp(&pol->selector, &policy->selector) &&
1539 		    xfrm_policy_mark_match(policy, pol) &&
1540 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
1541 		    !WARN_ON(delpol)) {
1542 			if (excl)
1543 				return ERR_PTR(-EEXIST);
1544 			delpol = pol;
1545 			if (policy->priority > pol->priority)
1546 				continue;
1547 		} else if (policy->priority >= pol->priority) {
1548 			newpos = pol;
1549 			continue;
1550 		}
1551 		if (delpol)
1552 			break;
1553 	}
1554 
1555 	if (newpos)
1556 		hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1557 	else
1558 		hlist_add_head_rcu(&policy->bydst, chain);
1559 
1560 	return delpol;
1561 }
1562 
1563 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1564 {
1565 	struct net *net = xp_net(policy);
1566 	struct xfrm_policy *delpol;
1567 	struct hlist_head *chain;
1568 
1569 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1570 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1571 	if (chain)
1572 		delpol = xfrm_policy_insert_list(chain, policy, excl);
1573 	else
1574 		delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1575 
1576 	if (IS_ERR(delpol)) {
1577 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1578 		return PTR_ERR(delpol);
1579 	}
1580 
1581 	__xfrm_policy_link(policy, dir);
1582 
1583 	/* After previous checking, family can either be AF_INET or AF_INET6 */
1584 	if (policy->family == AF_INET)
1585 		rt_genid_bump_ipv4(net);
1586 	else
1587 		rt_genid_bump_ipv6(net);
1588 
1589 	if (delpol) {
1590 		xfrm_policy_requeue(delpol, policy);
1591 		__xfrm_policy_unlink(delpol, dir);
1592 	}
1593 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1594 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1595 	policy->curlft.add_time = ktime_get_real_seconds();
1596 	policy->curlft.use_time = 0;
1597 	if (!mod_timer(&policy->timer, jiffies + HZ))
1598 		xfrm_pol_hold(policy);
1599 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1600 
1601 	if (delpol)
1602 		xfrm_policy_kill(delpol);
1603 	else if (xfrm_bydst_should_resize(net, dir, NULL))
1604 		schedule_work(&net->xfrm.policy_hash_work);
1605 
1606 	return 0;
1607 }
1608 EXPORT_SYMBOL(xfrm_policy_insert);
1609 
1610 static struct xfrm_policy *
1611 __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id,
1612 			u8 type, int dir,
1613 			struct xfrm_selector *sel,
1614 			struct xfrm_sec_ctx *ctx)
1615 {
1616 	struct xfrm_policy *pol;
1617 
1618 	if (!chain)
1619 		return NULL;
1620 
1621 	hlist_for_each_entry(pol, chain, bydst) {
1622 		if (pol->type == type &&
1623 		    pol->if_id == if_id &&
1624 		    (mark & pol->mark.m) == pol->mark.v &&
1625 		    !selector_cmp(sel, &pol->selector) &&
1626 		    xfrm_sec_ctx_match(ctx, pol->security))
1627 			return pol;
1628 	}
1629 
1630 	return NULL;
1631 }
1632 
1633 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
1634 					  u8 type, int dir,
1635 					  struct xfrm_selector *sel,
1636 					  struct xfrm_sec_ctx *ctx, int delete,
1637 					  int *err)
1638 {
1639 	struct xfrm_pol_inexact_bin *bin = NULL;
1640 	struct xfrm_policy *pol, *ret = NULL;
1641 	struct hlist_head *chain;
1642 
1643 	*err = 0;
1644 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1645 	chain = policy_hash_bysel(net, sel, sel->family, dir);
1646 	if (!chain) {
1647 		struct xfrm_pol_inexact_candidates cand;
1648 		int i;
1649 
1650 		bin = xfrm_policy_inexact_lookup(net, type,
1651 						 sel->family, dir, if_id);
1652 		if (!bin) {
1653 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1654 			return NULL;
1655 		}
1656 
1657 		if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1658 							 &sel->saddr,
1659 							 &sel->daddr)) {
1660 			spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1661 			return NULL;
1662 		}
1663 
1664 		pol = NULL;
1665 		for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1666 			struct xfrm_policy *tmp;
1667 
1668 			tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1669 						      if_id, type, dir,
1670 						      sel, ctx);
1671 			if (!tmp)
1672 				continue;
1673 
1674 			if (!pol || tmp->pos < pol->pos)
1675 				pol = tmp;
1676 		}
1677 	} else {
1678 		pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1679 					      sel, ctx);
1680 	}
1681 
1682 	if (pol) {
1683 		xfrm_pol_hold(pol);
1684 		if (delete) {
1685 			*err = security_xfrm_policy_delete(pol->security);
1686 			if (*err) {
1687 				spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1688 				return pol;
1689 			}
1690 			__xfrm_policy_unlink(pol, dir);
1691 		}
1692 		ret = pol;
1693 	}
1694 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1695 
1696 	if (ret && delete)
1697 		xfrm_policy_kill(ret);
1698 	if (bin && delete)
1699 		xfrm_policy_inexact_prune_bin(bin);
1700 	return ret;
1701 }
1702 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1703 
1704 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id,
1705 				     u8 type, int dir, u32 id, int delete,
1706 				     int *err)
1707 {
1708 	struct xfrm_policy *pol, *ret;
1709 	struct hlist_head *chain;
1710 
1711 	*err = -ENOENT;
1712 	if (xfrm_policy_id2dir(id) != dir)
1713 		return NULL;
1714 
1715 	*err = 0;
1716 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1717 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
1718 	ret = NULL;
1719 	hlist_for_each_entry(pol, chain, byidx) {
1720 		if (pol->type == type && pol->index == id &&
1721 		    pol->if_id == if_id &&
1722 		    (mark & pol->mark.m) == pol->mark.v) {
1723 			xfrm_pol_hold(pol);
1724 			if (delete) {
1725 				*err = security_xfrm_policy_delete(
1726 								pol->security);
1727 				if (*err) {
1728 					spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1729 					return pol;
1730 				}
1731 				__xfrm_policy_unlink(pol, dir);
1732 			}
1733 			ret = pol;
1734 			break;
1735 		}
1736 	}
1737 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1738 
1739 	if (ret && delete)
1740 		xfrm_policy_kill(ret);
1741 	return ret;
1742 }
1743 EXPORT_SYMBOL(xfrm_policy_byid);
1744 
1745 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1746 static inline int
1747 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1748 {
1749 	struct xfrm_policy *pol;
1750 	int err = 0;
1751 
1752 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1753 		if (pol->walk.dead ||
1754 		    xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1755 		    pol->type != type)
1756 			continue;
1757 
1758 		err = security_xfrm_policy_delete(pol->security);
1759 		if (err) {
1760 			xfrm_audit_policy_delete(pol, 0, task_valid);
1761 			return err;
1762 		}
1763 	}
1764 	return err;
1765 }
1766 #else
1767 static inline int
1768 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1769 {
1770 	return 0;
1771 }
1772 #endif
1773 
1774 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1775 {
1776 	int dir, err = 0, cnt = 0;
1777 	struct xfrm_policy *pol;
1778 
1779 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1780 
1781 	err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1782 	if (err)
1783 		goto out;
1784 
1785 again:
1786 	list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1787 		dir = xfrm_policy_id2dir(pol->index);
1788 		if (pol->walk.dead ||
1789 		    dir >= XFRM_POLICY_MAX ||
1790 		    pol->type != type)
1791 			continue;
1792 
1793 		__xfrm_policy_unlink(pol, dir);
1794 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1795 		cnt++;
1796 		xfrm_audit_policy_delete(pol, 1, task_valid);
1797 		xfrm_policy_kill(pol);
1798 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1799 		goto again;
1800 	}
1801 	if (cnt)
1802 		__xfrm_policy_inexact_flush(net);
1803 	else
1804 		err = -ESRCH;
1805 out:
1806 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1807 	return err;
1808 }
1809 EXPORT_SYMBOL(xfrm_policy_flush);
1810 
1811 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1812 		     int (*func)(struct xfrm_policy *, int, int, void*),
1813 		     void *data)
1814 {
1815 	struct xfrm_policy *pol;
1816 	struct xfrm_policy_walk_entry *x;
1817 	int error = 0;
1818 
1819 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1820 	    walk->type != XFRM_POLICY_TYPE_ANY)
1821 		return -EINVAL;
1822 
1823 	if (list_empty(&walk->walk.all) && walk->seq != 0)
1824 		return 0;
1825 
1826 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1827 	if (list_empty(&walk->walk.all))
1828 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1829 	else
1830 		x = list_first_entry(&walk->walk.all,
1831 				     struct xfrm_policy_walk_entry, all);
1832 
1833 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1834 		if (x->dead)
1835 			continue;
1836 		pol = container_of(x, struct xfrm_policy, walk);
1837 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
1838 		    walk->type != pol->type)
1839 			continue;
1840 		error = func(pol, xfrm_policy_id2dir(pol->index),
1841 			     walk->seq, data);
1842 		if (error) {
1843 			list_move_tail(&walk->walk.all, &x->all);
1844 			goto out;
1845 		}
1846 		walk->seq++;
1847 	}
1848 	if (walk->seq == 0) {
1849 		error = -ENOENT;
1850 		goto out;
1851 	}
1852 	list_del_init(&walk->walk.all);
1853 out:
1854 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1855 	return error;
1856 }
1857 EXPORT_SYMBOL(xfrm_policy_walk);
1858 
1859 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1860 {
1861 	INIT_LIST_HEAD(&walk->walk.all);
1862 	walk->walk.dead = 1;
1863 	walk->type = type;
1864 	walk->seq = 0;
1865 }
1866 EXPORT_SYMBOL(xfrm_policy_walk_init);
1867 
1868 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1869 {
1870 	if (list_empty(&walk->walk.all))
1871 		return;
1872 
1873 	spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1874 	list_del(&walk->walk.all);
1875 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1876 }
1877 EXPORT_SYMBOL(xfrm_policy_walk_done);
1878 
1879 /*
1880  * Find policy to apply to this flow.
1881  *
1882  * Returns 0 if policy found, else an -errno.
1883  */
1884 static int xfrm_policy_match(const struct xfrm_policy *pol,
1885 			     const struct flowi *fl,
1886 			     u8 type, u16 family, int dir, u32 if_id)
1887 {
1888 	const struct xfrm_selector *sel = &pol->selector;
1889 	int ret = -ESRCH;
1890 	bool match;
1891 
1892 	if (pol->family != family ||
1893 	    pol->if_id != if_id ||
1894 	    (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1895 	    pol->type != type)
1896 		return ret;
1897 
1898 	match = xfrm_selector_match(sel, fl, family);
1899 	if (match)
1900 		ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid,
1901 						  dir);
1902 	return ret;
1903 }
1904 
1905 static struct xfrm_pol_inexact_node *
1906 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1907 				seqcount_t *count,
1908 				const xfrm_address_t *addr, u16 family)
1909 {
1910 	const struct rb_node *parent;
1911 	int seq;
1912 
1913 again:
1914 	seq = read_seqcount_begin(count);
1915 
1916 	parent = rcu_dereference_raw(r->rb_node);
1917 	while (parent) {
1918 		struct xfrm_pol_inexact_node *node;
1919 		int delta;
1920 
1921 		node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1922 
1923 		delta = xfrm_policy_addr_delta(addr, &node->addr,
1924 					       node->prefixlen, family);
1925 		if (delta < 0) {
1926 			parent = rcu_dereference_raw(parent->rb_left);
1927 			continue;
1928 		} else if (delta > 0) {
1929 			parent = rcu_dereference_raw(parent->rb_right);
1930 			continue;
1931 		}
1932 
1933 		return node;
1934 	}
1935 
1936 	if (read_seqcount_retry(count, seq))
1937 		goto again;
1938 
1939 	return NULL;
1940 }
1941 
1942 static bool
1943 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1944 				    struct xfrm_pol_inexact_bin *b,
1945 				    const xfrm_address_t *saddr,
1946 				    const xfrm_address_t *daddr)
1947 {
1948 	struct xfrm_pol_inexact_node *n;
1949 	u16 family;
1950 
1951 	if (!b)
1952 		return false;
1953 
1954 	family = b->k.family;
1955 	memset(cand, 0, sizeof(*cand));
1956 	cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
1957 
1958 	n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
1959 					    family);
1960 	if (n) {
1961 		cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
1962 		n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
1963 						    family);
1964 		if (n)
1965 			cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
1966 	}
1967 
1968 	n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
1969 					    family);
1970 	if (n)
1971 		cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
1972 
1973 	return true;
1974 }
1975 
1976 static struct xfrm_pol_inexact_bin *
1977 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
1978 			       u8 dir, u32 if_id)
1979 {
1980 	struct xfrm_pol_inexact_key k = {
1981 		.family = family,
1982 		.type = type,
1983 		.dir = dir,
1984 		.if_id = if_id,
1985 	};
1986 
1987 	write_pnet(&k.net, net);
1988 
1989 	return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
1990 				 xfrm_pol_inexact_params);
1991 }
1992 
1993 static struct xfrm_pol_inexact_bin *
1994 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
1995 			   u8 dir, u32 if_id)
1996 {
1997 	struct xfrm_pol_inexact_bin *bin;
1998 
1999 	lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2000 
2001 	rcu_read_lock();
2002 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2003 	rcu_read_unlock();
2004 
2005 	return bin;
2006 }
2007 
2008 static struct xfrm_policy *
2009 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2010 			      struct xfrm_policy *prefer,
2011 			      const struct flowi *fl,
2012 			      u8 type, u16 family, int dir, u32 if_id)
2013 {
2014 	u32 priority = prefer ? prefer->priority : ~0u;
2015 	struct xfrm_policy *pol;
2016 
2017 	if (!chain)
2018 		return NULL;
2019 
2020 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2021 		int err;
2022 
2023 		if (pol->priority > priority)
2024 			break;
2025 
2026 		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2027 		if (err) {
2028 			if (err != -ESRCH)
2029 				return ERR_PTR(err);
2030 
2031 			continue;
2032 		}
2033 
2034 		if (prefer) {
2035 			/* matches.  Is it older than *prefer? */
2036 			if (pol->priority == priority &&
2037 			    prefer->pos < pol->pos)
2038 				return prefer;
2039 		}
2040 
2041 		return pol;
2042 	}
2043 
2044 	return NULL;
2045 }
2046 
2047 static struct xfrm_policy *
2048 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2049 			    struct xfrm_policy *prefer,
2050 			    const struct flowi *fl,
2051 			    u8 type, u16 family, int dir, u32 if_id)
2052 {
2053 	struct xfrm_policy *tmp;
2054 	int i;
2055 
2056 	for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2057 		tmp = __xfrm_policy_eval_candidates(cand->res[i],
2058 						    prefer,
2059 						    fl, type, family, dir,
2060 						    if_id);
2061 		if (!tmp)
2062 			continue;
2063 
2064 		if (IS_ERR(tmp))
2065 			return tmp;
2066 		prefer = tmp;
2067 	}
2068 
2069 	return prefer;
2070 }
2071 
2072 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2073 						     const struct flowi *fl,
2074 						     u16 family, u8 dir,
2075 						     u32 if_id)
2076 {
2077 	struct xfrm_pol_inexact_candidates cand;
2078 	const xfrm_address_t *daddr, *saddr;
2079 	struct xfrm_pol_inexact_bin *bin;
2080 	struct xfrm_policy *pol, *ret;
2081 	struct hlist_head *chain;
2082 	unsigned int sequence;
2083 	int err;
2084 
2085 	daddr = xfrm_flowi_daddr(fl, family);
2086 	saddr = xfrm_flowi_saddr(fl, family);
2087 	if (unlikely(!daddr || !saddr))
2088 		return NULL;
2089 
2090 	rcu_read_lock();
2091  retry:
2092 	do {
2093 		sequence = read_seqcount_begin(&xfrm_policy_hash_generation);
2094 		chain = policy_hash_direct(net, daddr, saddr, family, dir);
2095 	} while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence));
2096 
2097 	ret = NULL;
2098 	hlist_for_each_entry_rcu(pol, chain, bydst) {
2099 		err = xfrm_policy_match(pol, fl, type, family, dir, if_id);
2100 		if (err) {
2101 			if (err == -ESRCH)
2102 				continue;
2103 			else {
2104 				ret = ERR_PTR(err);
2105 				goto fail;
2106 			}
2107 		} else {
2108 			ret = pol;
2109 			break;
2110 		}
2111 	}
2112 	bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2113 	if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2114 							 daddr))
2115 		goto skip_inexact;
2116 
2117 	pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2118 					  family, dir, if_id);
2119 	if (pol) {
2120 		ret = pol;
2121 		if (IS_ERR(pol))
2122 			goto fail;
2123 	}
2124 
2125 skip_inexact:
2126 	if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence))
2127 		goto retry;
2128 
2129 	if (ret && !xfrm_pol_hold_rcu(ret))
2130 		goto retry;
2131 fail:
2132 	rcu_read_unlock();
2133 
2134 	return ret;
2135 }
2136 
2137 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2138 					      const struct flowi *fl,
2139 					      u16 family, u8 dir, u32 if_id)
2140 {
2141 #ifdef CONFIG_XFRM_SUB_POLICY
2142 	struct xfrm_policy *pol;
2143 
2144 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2145 					dir, if_id);
2146 	if (pol != NULL)
2147 		return pol;
2148 #endif
2149 	return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2150 					 dir, if_id);
2151 }
2152 
2153 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2154 						 const struct flowi *fl,
2155 						 u16 family, u32 if_id)
2156 {
2157 	struct xfrm_policy *pol;
2158 
2159 	rcu_read_lock();
2160  again:
2161 	pol = rcu_dereference(sk->sk_policy[dir]);
2162 	if (pol != NULL) {
2163 		bool match;
2164 		int err = 0;
2165 
2166 		if (pol->family != family) {
2167 			pol = NULL;
2168 			goto out;
2169 		}
2170 
2171 		match = xfrm_selector_match(&pol->selector, fl, family);
2172 		if (match) {
2173 			if ((sk->sk_mark & pol->mark.m) != pol->mark.v ||
2174 			    pol->if_id != if_id) {
2175 				pol = NULL;
2176 				goto out;
2177 			}
2178 			err = security_xfrm_policy_lookup(pol->security,
2179 						      fl->flowi_secid,
2180 						      dir);
2181 			if (!err) {
2182 				if (!xfrm_pol_hold_rcu(pol))
2183 					goto again;
2184 			} else if (err == -ESRCH) {
2185 				pol = NULL;
2186 			} else {
2187 				pol = ERR_PTR(err);
2188 			}
2189 		} else
2190 			pol = NULL;
2191 	}
2192 out:
2193 	rcu_read_unlock();
2194 	return pol;
2195 }
2196 
2197 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2198 {
2199 	struct net *net = xp_net(pol);
2200 
2201 	list_add(&pol->walk.all, &net->xfrm.policy_all);
2202 	net->xfrm.policy_count[dir]++;
2203 	xfrm_pol_hold(pol);
2204 }
2205 
2206 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2207 						int dir)
2208 {
2209 	struct net *net = xp_net(pol);
2210 
2211 	if (list_empty(&pol->walk.all))
2212 		return NULL;
2213 
2214 	/* Socket policies are not hashed. */
2215 	if (!hlist_unhashed(&pol->bydst)) {
2216 		hlist_del_rcu(&pol->bydst);
2217 		hlist_del_init(&pol->bydst_inexact_list);
2218 		hlist_del(&pol->byidx);
2219 	}
2220 
2221 	list_del_init(&pol->walk.all);
2222 	net->xfrm.policy_count[dir]--;
2223 
2224 	return pol;
2225 }
2226 
2227 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2228 {
2229 	__xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2230 }
2231 
2232 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2233 {
2234 	__xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2235 }
2236 
2237 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2238 {
2239 	struct net *net = xp_net(pol);
2240 
2241 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2242 	pol = __xfrm_policy_unlink(pol, dir);
2243 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2244 	if (pol) {
2245 		xfrm_policy_kill(pol);
2246 		return 0;
2247 	}
2248 	return -ENOENT;
2249 }
2250 EXPORT_SYMBOL(xfrm_policy_delete);
2251 
2252 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2253 {
2254 	struct net *net = sock_net(sk);
2255 	struct xfrm_policy *old_pol;
2256 
2257 #ifdef CONFIG_XFRM_SUB_POLICY
2258 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2259 		return -EINVAL;
2260 #endif
2261 
2262 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2263 	old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2264 				lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2265 	if (pol) {
2266 		pol->curlft.add_time = ktime_get_real_seconds();
2267 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2268 		xfrm_sk_policy_link(pol, dir);
2269 	}
2270 	rcu_assign_pointer(sk->sk_policy[dir], pol);
2271 	if (old_pol) {
2272 		if (pol)
2273 			xfrm_policy_requeue(old_pol, pol);
2274 
2275 		/* Unlinking succeeds always. This is the only function
2276 		 * allowed to delete or replace socket policy.
2277 		 */
2278 		xfrm_sk_policy_unlink(old_pol, dir);
2279 	}
2280 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2281 
2282 	if (old_pol) {
2283 		xfrm_policy_kill(old_pol);
2284 	}
2285 	return 0;
2286 }
2287 
2288 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2289 {
2290 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2291 	struct net *net = xp_net(old);
2292 
2293 	if (newp) {
2294 		newp->selector = old->selector;
2295 		if (security_xfrm_policy_clone(old->security,
2296 					       &newp->security)) {
2297 			kfree(newp);
2298 			return NULL;  /* ENOMEM */
2299 		}
2300 		newp->lft = old->lft;
2301 		newp->curlft = old->curlft;
2302 		newp->mark = old->mark;
2303 		newp->if_id = old->if_id;
2304 		newp->action = old->action;
2305 		newp->flags = old->flags;
2306 		newp->xfrm_nr = old->xfrm_nr;
2307 		newp->index = old->index;
2308 		newp->type = old->type;
2309 		newp->family = old->family;
2310 		memcpy(newp->xfrm_vec, old->xfrm_vec,
2311 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2312 		spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2313 		xfrm_sk_policy_link(newp, dir);
2314 		spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2315 		xfrm_pol_put(newp);
2316 	}
2317 	return newp;
2318 }
2319 
2320 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2321 {
2322 	const struct xfrm_policy *p;
2323 	struct xfrm_policy *np;
2324 	int i, ret = 0;
2325 
2326 	rcu_read_lock();
2327 	for (i = 0; i < 2; i++) {
2328 		p = rcu_dereference(osk->sk_policy[i]);
2329 		if (p) {
2330 			np = clone_policy(p, i);
2331 			if (unlikely(!np)) {
2332 				ret = -ENOMEM;
2333 				break;
2334 			}
2335 			rcu_assign_pointer(sk->sk_policy[i], np);
2336 		}
2337 	}
2338 	rcu_read_unlock();
2339 	return ret;
2340 }
2341 
2342 static int
2343 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2344 	       xfrm_address_t *remote, unsigned short family, u32 mark)
2345 {
2346 	int err;
2347 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2348 
2349 	if (unlikely(afinfo == NULL))
2350 		return -EINVAL;
2351 	err = afinfo->get_saddr(net, oif, local, remote, mark);
2352 	rcu_read_unlock();
2353 	return err;
2354 }
2355 
2356 /* Resolve list of templates for the flow, given policy. */
2357 
2358 static int
2359 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2360 		      struct xfrm_state **xfrm, unsigned short family)
2361 {
2362 	struct net *net = xp_net(policy);
2363 	int nx;
2364 	int i, error;
2365 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2366 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2367 	xfrm_address_t tmp;
2368 
2369 	for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2370 		struct xfrm_state *x;
2371 		xfrm_address_t *remote = daddr;
2372 		xfrm_address_t *local  = saddr;
2373 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2374 
2375 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
2376 		    tmpl->mode == XFRM_MODE_BEET) {
2377 			remote = &tmpl->id.daddr;
2378 			local = &tmpl->saddr;
2379 			if (xfrm_addr_any(local, tmpl->encap_family)) {
2380 				error = xfrm_get_saddr(net, fl->flowi_oif,
2381 						       &tmp, remote,
2382 						       tmpl->encap_family, 0);
2383 				if (error)
2384 					goto fail;
2385 				local = &tmp;
2386 			}
2387 		}
2388 
2389 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2390 				    family, policy->if_id);
2391 
2392 		if (x && x->km.state == XFRM_STATE_VALID) {
2393 			xfrm[nx++] = x;
2394 			daddr = remote;
2395 			saddr = local;
2396 			continue;
2397 		}
2398 		if (x) {
2399 			error = (x->km.state == XFRM_STATE_ERROR ?
2400 				 -EINVAL : -EAGAIN);
2401 			xfrm_state_put(x);
2402 		} else if (error == -ESRCH) {
2403 			error = -EAGAIN;
2404 		}
2405 
2406 		if (!tmpl->optional)
2407 			goto fail;
2408 	}
2409 	return nx;
2410 
2411 fail:
2412 	for (nx--; nx >= 0; nx--)
2413 		xfrm_state_put(xfrm[nx]);
2414 	return error;
2415 }
2416 
2417 static int
2418 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2419 		  struct xfrm_state **xfrm, unsigned short family)
2420 {
2421 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
2422 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2423 	int cnx = 0;
2424 	int error;
2425 	int ret;
2426 	int i;
2427 
2428 	for (i = 0; i < npols; i++) {
2429 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2430 			error = -ENOBUFS;
2431 			goto fail;
2432 		}
2433 
2434 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2435 		if (ret < 0) {
2436 			error = ret;
2437 			goto fail;
2438 		} else
2439 			cnx += ret;
2440 	}
2441 
2442 	/* found states are sorted for outbound processing */
2443 	if (npols > 1)
2444 		xfrm_state_sort(xfrm, tpp, cnx, family);
2445 
2446 	return cnx;
2447 
2448  fail:
2449 	for (cnx--; cnx >= 0; cnx--)
2450 		xfrm_state_put(tpp[cnx]);
2451 	return error;
2452 
2453 }
2454 
2455 static int xfrm_get_tos(const struct flowi *fl, int family)
2456 {
2457 	if (family == AF_INET)
2458 		return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos;
2459 
2460 	return 0;
2461 }
2462 
2463 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2464 {
2465 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2466 	struct dst_ops *dst_ops;
2467 	struct xfrm_dst *xdst;
2468 
2469 	if (!afinfo)
2470 		return ERR_PTR(-EINVAL);
2471 
2472 	switch (family) {
2473 	case AF_INET:
2474 		dst_ops = &net->xfrm.xfrm4_dst_ops;
2475 		break;
2476 #if IS_ENABLED(CONFIG_IPV6)
2477 	case AF_INET6:
2478 		dst_ops = &net->xfrm.xfrm6_dst_ops;
2479 		break;
2480 #endif
2481 	default:
2482 		BUG();
2483 	}
2484 	xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2485 
2486 	if (likely(xdst)) {
2487 		struct dst_entry *dst = &xdst->u.dst;
2488 
2489 		memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst));
2490 	} else
2491 		xdst = ERR_PTR(-ENOBUFS);
2492 
2493 	rcu_read_unlock();
2494 
2495 	return xdst;
2496 }
2497 
2498 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2499 			   int nfheader_len)
2500 {
2501 	if (dst->ops->family == AF_INET6) {
2502 		struct rt6_info *rt = (struct rt6_info *)dst;
2503 		path->path_cookie = rt6_get_cookie(rt);
2504 		path->u.rt6.rt6i_nfheader_len = nfheader_len;
2505 	}
2506 }
2507 
2508 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2509 				const struct flowi *fl)
2510 {
2511 	const struct xfrm_policy_afinfo *afinfo =
2512 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2513 	int err;
2514 
2515 	if (!afinfo)
2516 		return -EINVAL;
2517 
2518 	err = afinfo->fill_dst(xdst, dev, fl);
2519 
2520 	rcu_read_unlock();
2521 
2522 	return err;
2523 }
2524 
2525 
2526 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2527  * all the metrics... Shortly, bundle a bundle.
2528  */
2529 
2530 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2531 					    struct xfrm_state **xfrm,
2532 					    struct xfrm_dst **bundle,
2533 					    int nx,
2534 					    const struct flowi *fl,
2535 					    struct dst_entry *dst)
2536 {
2537 	const struct xfrm_state_afinfo *afinfo;
2538 	const struct xfrm_mode *inner_mode;
2539 	struct net *net = xp_net(policy);
2540 	unsigned long now = jiffies;
2541 	struct net_device *dev;
2542 	struct xfrm_dst *xdst_prev = NULL;
2543 	struct xfrm_dst *xdst0 = NULL;
2544 	int i = 0;
2545 	int err;
2546 	int header_len = 0;
2547 	int nfheader_len = 0;
2548 	int trailer_len = 0;
2549 	int tos;
2550 	int family = policy->selector.family;
2551 	xfrm_address_t saddr, daddr;
2552 
2553 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2554 
2555 	tos = xfrm_get_tos(fl, family);
2556 
2557 	dst_hold(dst);
2558 
2559 	for (; i < nx; i++) {
2560 		struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2561 		struct dst_entry *dst1 = &xdst->u.dst;
2562 
2563 		err = PTR_ERR(xdst);
2564 		if (IS_ERR(xdst)) {
2565 			dst_release(dst);
2566 			goto put_states;
2567 		}
2568 
2569 		bundle[i] = xdst;
2570 		if (!xdst_prev)
2571 			xdst0 = xdst;
2572 		else
2573 			/* Ref count is taken during xfrm_alloc_dst()
2574 			 * No need to do dst_clone() on dst1
2575 			 */
2576 			xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2577 
2578 		if (xfrm[i]->sel.family == AF_UNSPEC) {
2579 			inner_mode = xfrm_ip2inner_mode(xfrm[i],
2580 							xfrm_af2proto(family));
2581 			if (!inner_mode) {
2582 				err = -EAFNOSUPPORT;
2583 				dst_release(dst);
2584 				goto put_states;
2585 			}
2586 		} else
2587 			inner_mode = &xfrm[i]->inner_mode;
2588 
2589 		xdst->route = dst;
2590 		dst_copy_metrics(dst1, dst);
2591 
2592 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2593 			__u32 mark = 0;
2594 
2595 			if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2596 				mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2597 
2598 			family = xfrm[i]->props.family;
2599 			dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif,
2600 					      &saddr, &daddr, family, mark);
2601 			err = PTR_ERR(dst);
2602 			if (IS_ERR(dst))
2603 				goto put_states;
2604 		} else
2605 			dst_hold(dst);
2606 
2607 		dst1->xfrm = xfrm[i];
2608 		xdst->xfrm_genid = xfrm[i]->genid;
2609 
2610 		dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2611 		dst1->flags |= DST_HOST;
2612 		dst1->lastuse = now;
2613 
2614 		dst1->input = dst_discard;
2615 
2616 		rcu_read_lock();
2617 		afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2618 		if (likely(afinfo))
2619 			dst1->output = afinfo->output;
2620 		else
2621 			dst1->output = dst_discard_out;
2622 		rcu_read_unlock();
2623 
2624 		xdst_prev = xdst;
2625 
2626 		header_len += xfrm[i]->props.header_len;
2627 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2628 			nfheader_len += xfrm[i]->props.header_len;
2629 		trailer_len += xfrm[i]->props.trailer_len;
2630 	}
2631 
2632 	xfrm_dst_set_child(xdst_prev, dst);
2633 	xdst0->path = dst;
2634 
2635 	err = -ENODEV;
2636 	dev = dst->dev;
2637 	if (!dev)
2638 		goto free_dst;
2639 
2640 	xfrm_init_path(xdst0, dst, nfheader_len);
2641 	xfrm_init_pmtu(bundle, nx);
2642 
2643 	for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2644 	     xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2645 		err = xfrm_fill_dst(xdst_prev, dev, fl);
2646 		if (err)
2647 			goto free_dst;
2648 
2649 		xdst_prev->u.dst.header_len = header_len;
2650 		xdst_prev->u.dst.trailer_len = trailer_len;
2651 		header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2652 		trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2653 	}
2654 
2655 	return &xdst0->u.dst;
2656 
2657 put_states:
2658 	for (; i < nx; i++)
2659 		xfrm_state_put(xfrm[i]);
2660 free_dst:
2661 	if (xdst0)
2662 		dst_release_immediate(&xdst0->u.dst);
2663 
2664 	return ERR_PTR(err);
2665 }
2666 
2667 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2668 				struct xfrm_policy **pols,
2669 				int *num_pols, int *num_xfrms)
2670 {
2671 	int i;
2672 
2673 	if (*num_pols == 0 || !pols[0]) {
2674 		*num_pols = 0;
2675 		*num_xfrms = 0;
2676 		return 0;
2677 	}
2678 	if (IS_ERR(pols[0]))
2679 		return PTR_ERR(pols[0]);
2680 
2681 	*num_xfrms = pols[0]->xfrm_nr;
2682 
2683 #ifdef CONFIG_XFRM_SUB_POLICY
2684 	if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
2685 	    pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2686 		pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2687 						    XFRM_POLICY_TYPE_MAIN,
2688 						    fl, family,
2689 						    XFRM_POLICY_OUT,
2690 						    pols[0]->if_id);
2691 		if (pols[1]) {
2692 			if (IS_ERR(pols[1])) {
2693 				xfrm_pols_put(pols, *num_pols);
2694 				return PTR_ERR(pols[1]);
2695 			}
2696 			(*num_pols)++;
2697 			(*num_xfrms) += pols[1]->xfrm_nr;
2698 		}
2699 	}
2700 #endif
2701 	for (i = 0; i < *num_pols; i++) {
2702 		if (pols[i]->action != XFRM_POLICY_ALLOW) {
2703 			*num_xfrms = -1;
2704 			break;
2705 		}
2706 	}
2707 
2708 	return 0;
2709 
2710 }
2711 
2712 static struct xfrm_dst *
2713 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2714 			       const struct flowi *fl, u16 family,
2715 			       struct dst_entry *dst_orig)
2716 {
2717 	struct net *net = xp_net(pols[0]);
2718 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2719 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2720 	struct xfrm_dst *xdst;
2721 	struct dst_entry *dst;
2722 	int err;
2723 
2724 	/* Try to instantiate a bundle */
2725 	err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2726 	if (err <= 0) {
2727 		if (err == 0)
2728 			return NULL;
2729 
2730 		if (err != -EAGAIN)
2731 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2732 		return ERR_PTR(err);
2733 	}
2734 
2735 	dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2736 	if (IS_ERR(dst)) {
2737 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2738 		return ERR_CAST(dst);
2739 	}
2740 
2741 	xdst = (struct xfrm_dst *)dst;
2742 	xdst->num_xfrms = err;
2743 	xdst->num_pols = num_pols;
2744 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2745 	xdst->policy_genid = atomic_read(&pols[0]->genid);
2746 
2747 	return xdst;
2748 }
2749 
2750 static void xfrm_policy_queue_process(struct timer_list *t)
2751 {
2752 	struct sk_buff *skb;
2753 	struct sock *sk;
2754 	struct dst_entry *dst;
2755 	struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2756 	struct net *net = xp_net(pol);
2757 	struct xfrm_policy_queue *pq = &pol->polq;
2758 	struct flowi fl;
2759 	struct sk_buff_head list;
2760 
2761 	spin_lock(&pq->hold_queue.lock);
2762 	skb = skb_peek(&pq->hold_queue);
2763 	if (!skb) {
2764 		spin_unlock(&pq->hold_queue.lock);
2765 		goto out;
2766 	}
2767 	dst = skb_dst(skb);
2768 	sk = skb->sk;
2769 	xfrm_decode_session(skb, &fl, dst->ops->family);
2770 	spin_unlock(&pq->hold_queue.lock);
2771 
2772 	dst_hold(xfrm_dst_path(dst));
2773 	dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2774 	if (IS_ERR(dst))
2775 		goto purge_queue;
2776 
2777 	if (dst->flags & DST_XFRM_QUEUE) {
2778 		dst_release(dst);
2779 
2780 		if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2781 			goto purge_queue;
2782 
2783 		pq->timeout = pq->timeout << 1;
2784 		if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2785 			xfrm_pol_hold(pol);
2786 		goto out;
2787 	}
2788 
2789 	dst_release(dst);
2790 
2791 	__skb_queue_head_init(&list);
2792 
2793 	spin_lock(&pq->hold_queue.lock);
2794 	pq->timeout = 0;
2795 	skb_queue_splice_init(&pq->hold_queue, &list);
2796 	spin_unlock(&pq->hold_queue.lock);
2797 
2798 	while (!skb_queue_empty(&list)) {
2799 		skb = __skb_dequeue(&list);
2800 
2801 		xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family);
2802 		dst_hold(xfrm_dst_path(skb_dst(skb)));
2803 		dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2804 		if (IS_ERR(dst)) {
2805 			kfree_skb(skb);
2806 			continue;
2807 		}
2808 
2809 		nf_reset(skb);
2810 		skb_dst_drop(skb);
2811 		skb_dst_set(skb, dst);
2812 
2813 		dst_output(net, skb->sk, skb);
2814 	}
2815 
2816 out:
2817 	xfrm_pol_put(pol);
2818 	return;
2819 
2820 purge_queue:
2821 	pq->timeout = 0;
2822 	skb_queue_purge(&pq->hold_queue);
2823 	xfrm_pol_put(pol);
2824 }
2825 
2826 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2827 {
2828 	unsigned long sched_next;
2829 	struct dst_entry *dst = skb_dst(skb);
2830 	struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2831 	struct xfrm_policy *pol = xdst->pols[0];
2832 	struct xfrm_policy_queue *pq = &pol->polq;
2833 
2834 	if (unlikely(skb_fclone_busy(sk, skb))) {
2835 		kfree_skb(skb);
2836 		return 0;
2837 	}
2838 
2839 	if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2840 		kfree_skb(skb);
2841 		return -EAGAIN;
2842 	}
2843 
2844 	skb_dst_force(skb);
2845 
2846 	spin_lock_bh(&pq->hold_queue.lock);
2847 
2848 	if (!pq->timeout)
2849 		pq->timeout = XFRM_QUEUE_TMO_MIN;
2850 
2851 	sched_next = jiffies + pq->timeout;
2852 
2853 	if (del_timer(&pq->hold_timer)) {
2854 		if (time_before(pq->hold_timer.expires, sched_next))
2855 			sched_next = pq->hold_timer.expires;
2856 		xfrm_pol_put(pol);
2857 	}
2858 
2859 	__skb_queue_tail(&pq->hold_queue, skb);
2860 	if (!mod_timer(&pq->hold_timer, sched_next))
2861 		xfrm_pol_hold(pol);
2862 
2863 	spin_unlock_bh(&pq->hold_queue.lock);
2864 
2865 	return 0;
2866 }
2867 
2868 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2869 						 struct xfrm_flo *xflo,
2870 						 const struct flowi *fl,
2871 						 int num_xfrms,
2872 						 u16 family)
2873 {
2874 	int err;
2875 	struct net_device *dev;
2876 	struct dst_entry *dst;
2877 	struct dst_entry *dst1;
2878 	struct xfrm_dst *xdst;
2879 
2880 	xdst = xfrm_alloc_dst(net, family);
2881 	if (IS_ERR(xdst))
2882 		return xdst;
2883 
2884 	if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2885 	    net->xfrm.sysctl_larval_drop ||
2886 	    num_xfrms <= 0)
2887 		return xdst;
2888 
2889 	dst = xflo->dst_orig;
2890 	dst1 = &xdst->u.dst;
2891 	dst_hold(dst);
2892 	xdst->route = dst;
2893 
2894 	dst_copy_metrics(dst1, dst);
2895 
2896 	dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2897 	dst1->flags |= DST_HOST | DST_XFRM_QUEUE;
2898 	dst1->lastuse = jiffies;
2899 
2900 	dst1->input = dst_discard;
2901 	dst1->output = xdst_queue_output;
2902 
2903 	dst_hold(dst);
2904 	xfrm_dst_set_child(xdst, dst);
2905 	xdst->path = dst;
2906 
2907 	xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
2908 
2909 	err = -ENODEV;
2910 	dev = dst->dev;
2911 	if (!dev)
2912 		goto free_dst;
2913 
2914 	err = xfrm_fill_dst(xdst, dev, fl);
2915 	if (err)
2916 		goto free_dst;
2917 
2918 out:
2919 	return xdst;
2920 
2921 free_dst:
2922 	dst_release(dst1);
2923 	xdst = ERR_PTR(err);
2924 	goto out;
2925 }
2926 
2927 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
2928 					   const struct flowi *fl,
2929 					   u16 family, u8 dir,
2930 					   struct xfrm_flo *xflo, u32 if_id)
2931 {
2932 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
2933 	int num_pols = 0, num_xfrms = 0, err;
2934 	struct xfrm_dst *xdst;
2935 
2936 	/* Resolve policies to use if we couldn't get them from
2937 	 * previous cache entry */
2938 	num_pols = 1;
2939 	pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
2940 	err = xfrm_expand_policies(fl, family, pols,
2941 					   &num_pols, &num_xfrms);
2942 	if (err < 0)
2943 		goto inc_error;
2944 	if (num_pols == 0)
2945 		return NULL;
2946 	if (num_xfrms <= 0)
2947 		goto make_dummy_bundle;
2948 
2949 	xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
2950 					      xflo->dst_orig);
2951 	if (IS_ERR(xdst)) {
2952 		err = PTR_ERR(xdst);
2953 		if (err == -EREMOTE) {
2954 			xfrm_pols_put(pols, num_pols);
2955 			return NULL;
2956 		}
2957 
2958 		if (err != -EAGAIN)
2959 			goto error;
2960 		goto make_dummy_bundle;
2961 	} else if (xdst == NULL) {
2962 		num_xfrms = 0;
2963 		goto make_dummy_bundle;
2964 	}
2965 
2966 	return xdst;
2967 
2968 make_dummy_bundle:
2969 	/* We found policies, but there's no bundles to instantiate:
2970 	 * either because the policy blocks, has no transformations or
2971 	 * we could not build template (no xfrm_states).*/
2972 	xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
2973 	if (IS_ERR(xdst)) {
2974 		xfrm_pols_put(pols, num_pols);
2975 		return ERR_CAST(xdst);
2976 	}
2977 	xdst->num_pols = num_pols;
2978 	xdst->num_xfrms = num_xfrms;
2979 	memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2980 
2981 	return xdst;
2982 
2983 inc_error:
2984 	XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2985 error:
2986 	xfrm_pols_put(pols, num_pols);
2987 	return ERR_PTR(err);
2988 }
2989 
2990 static struct dst_entry *make_blackhole(struct net *net, u16 family,
2991 					struct dst_entry *dst_orig)
2992 {
2993 	const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2994 	struct dst_entry *ret;
2995 
2996 	if (!afinfo) {
2997 		dst_release(dst_orig);
2998 		return ERR_PTR(-EINVAL);
2999 	} else {
3000 		ret = afinfo->blackhole_route(net, dst_orig);
3001 	}
3002 	rcu_read_unlock();
3003 
3004 	return ret;
3005 }
3006 
3007 /* Finds/creates a bundle for given flow and if_id
3008  *
3009  * At the moment we eat a raw IP route. Mostly to speed up lookups
3010  * on interfaces with disabled IPsec.
3011  *
3012  * xfrm_lookup uses an if_id of 0 by default, and is provided for
3013  * compatibility
3014  */
3015 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3016 					struct dst_entry *dst_orig,
3017 					const struct flowi *fl,
3018 					const struct sock *sk,
3019 					int flags, u32 if_id)
3020 {
3021 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3022 	struct xfrm_dst *xdst;
3023 	struct dst_entry *dst, *route;
3024 	u16 family = dst_orig->ops->family;
3025 	u8 dir = XFRM_POLICY_OUT;
3026 	int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3027 
3028 	dst = NULL;
3029 	xdst = NULL;
3030 	route = NULL;
3031 
3032 	sk = sk_const_to_full_sk(sk);
3033 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3034 		num_pols = 1;
3035 		pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3036 						if_id);
3037 		err = xfrm_expand_policies(fl, family, pols,
3038 					   &num_pols, &num_xfrms);
3039 		if (err < 0)
3040 			goto dropdst;
3041 
3042 		if (num_pols) {
3043 			if (num_xfrms <= 0) {
3044 				drop_pols = num_pols;
3045 				goto no_transform;
3046 			}
3047 
3048 			xdst = xfrm_resolve_and_create_bundle(
3049 					pols, num_pols, fl,
3050 					family, dst_orig);
3051 
3052 			if (IS_ERR(xdst)) {
3053 				xfrm_pols_put(pols, num_pols);
3054 				err = PTR_ERR(xdst);
3055 				if (err == -EREMOTE)
3056 					goto nopol;
3057 
3058 				goto dropdst;
3059 			} else if (xdst == NULL) {
3060 				num_xfrms = 0;
3061 				drop_pols = num_pols;
3062 				goto no_transform;
3063 			}
3064 
3065 			route = xdst->route;
3066 		}
3067 	}
3068 
3069 	if (xdst == NULL) {
3070 		struct xfrm_flo xflo;
3071 
3072 		xflo.dst_orig = dst_orig;
3073 		xflo.flags = flags;
3074 
3075 		/* To accelerate a bit...  */
3076 		if ((dst_orig->flags & DST_NOXFRM) ||
3077 		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
3078 			goto nopol;
3079 
3080 		xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3081 		if (xdst == NULL)
3082 			goto nopol;
3083 		if (IS_ERR(xdst)) {
3084 			err = PTR_ERR(xdst);
3085 			goto dropdst;
3086 		}
3087 
3088 		num_pols = xdst->num_pols;
3089 		num_xfrms = xdst->num_xfrms;
3090 		memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3091 		route = xdst->route;
3092 	}
3093 
3094 	dst = &xdst->u.dst;
3095 	if (route == NULL && num_xfrms > 0) {
3096 		/* The only case when xfrm_bundle_lookup() returns a
3097 		 * bundle with null route, is when the template could
3098 		 * not be resolved. It means policies are there, but
3099 		 * bundle could not be created, since we don't yet
3100 		 * have the xfrm_state's. We need to wait for KM to
3101 		 * negotiate new SA's or bail out with error.*/
3102 		if (net->xfrm.sysctl_larval_drop) {
3103 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3104 			err = -EREMOTE;
3105 			goto error;
3106 		}
3107 
3108 		err = -EAGAIN;
3109 
3110 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3111 		goto error;
3112 	}
3113 
3114 no_transform:
3115 	if (num_pols == 0)
3116 		goto nopol;
3117 
3118 	if ((flags & XFRM_LOOKUP_ICMP) &&
3119 	    !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3120 		err = -ENOENT;
3121 		goto error;
3122 	}
3123 
3124 	for (i = 0; i < num_pols; i++)
3125 		pols[i]->curlft.use_time = ktime_get_real_seconds();
3126 
3127 	if (num_xfrms < 0) {
3128 		/* Prohibit the flow */
3129 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3130 		err = -EPERM;
3131 		goto error;
3132 	} else if (num_xfrms > 0) {
3133 		/* Flow transformed */
3134 		dst_release(dst_orig);
3135 	} else {
3136 		/* Flow passes untransformed */
3137 		dst_release(dst);
3138 		dst = dst_orig;
3139 	}
3140 ok:
3141 	xfrm_pols_put(pols, drop_pols);
3142 	if (dst && dst->xfrm &&
3143 	    dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3144 		dst->flags |= DST_XFRM_TUNNEL;
3145 	return dst;
3146 
3147 nopol:
3148 	if (!(flags & XFRM_LOOKUP_ICMP)) {
3149 		dst = dst_orig;
3150 		goto ok;
3151 	}
3152 	err = -ENOENT;
3153 error:
3154 	dst_release(dst);
3155 dropdst:
3156 	if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3157 		dst_release(dst_orig);
3158 	xfrm_pols_put(pols, drop_pols);
3159 	return ERR_PTR(err);
3160 }
3161 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3162 
3163 /* Main function: finds/creates a bundle for given flow.
3164  *
3165  * At the moment we eat a raw IP route. Mostly to speed up lookups
3166  * on interfaces with disabled IPsec.
3167  */
3168 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3169 			      const struct flowi *fl, const struct sock *sk,
3170 			      int flags)
3171 {
3172 	return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3173 }
3174 EXPORT_SYMBOL(xfrm_lookup);
3175 
3176 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3177  * Otherwise we may send out blackholed packets.
3178  */
3179 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3180 				    const struct flowi *fl,
3181 				    const struct sock *sk, int flags)
3182 {
3183 	struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3184 					    flags | XFRM_LOOKUP_QUEUE |
3185 					    XFRM_LOOKUP_KEEP_DST_REF);
3186 
3187 	if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE)
3188 		return make_blackhole(net, dst_orig->ops->family, dst_orig);
3189 
3190 	if (IS_ERR(dst))
3191 		dst_release(dst_orig);
3192 
3193 	return dst;
3194 }
3195 EXPORT_SYMBOL(xfrm_lookup_route);
3196 
3197 static inline int
3198 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3199 {
3200 	struct sec_path *sp = skb_sec_path(skb);
3201 	struct xfrm_state *x;
3202 
3203 	if (!sp || idx < 0 || idx >= sp->len)
3204 		return 0;
3205 	x = sp->xvec[idx];
3206 	if (!x->type->reject)
3207 		return 0;
3208 	return x->type->reject(x, skb, fl);
3209 }
3210 
3211 /* When skb is transformed back to its "native" form, we have to
3212  * check policy restrictions. At the moment we make this in maximally
3213  * stupid way. Shame on me. :-) Of course, connected sockets must
3214  * have policy cached at them.
3215  */
3216 
3217 static inline int
3218 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3219 	      unsigned short family)
3220 {
3221 	if (xfrm_state_kern(x))
3222 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3223 	return	x->id.proto == tmpl->id.proto &&
3224 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3225 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3226 		x->props.mode == tmpl->mode &&
3227 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3228 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3229 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
3230 		  xfrm_state_addr_cmp(tmpl, x, family));
3231 }
3232 
3233 /*
3234  * 0 or more than 0 is returned when validation is succeeded (either bypass
3235  * because of optional transport mode, or next index of the mathced secpath
3236  * state with the template.
3237  * -1 is returned when no matching template is found.
3238  * Otherwise "-2 - errored_index" is returned.
3239  */
3240 static inline int
3241 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3242 	       unsigned short family)
3243 {
3244 	int idx = start;
3245 
3246 	if (tmpl->optional) {
3247 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
3248 			return start;
3249 	} else
3250 		start = -1;
3251 	for (; idx < sp->len; idx++) {
3252 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
3253 			return ++idx;
3254 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3255 			if (start == -1)
3256 				start = -2-idx;
3257 			break;
3258 		}
3259 	}
3260 	return start;
3261 }
3262 
3263 static void
3264 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse)
3265 {
3266 	const struct iphdr *iph = ip_hdr(skb);
3267 	int ihl = iph->ihl;
3268 	u8 *xprth = skb_network_header(skb) + ihl * 4;
3269 	struct flowi4 *fl4 = &fl->u.ip4;
3270 	int oif = 0;
3271 
3272 	if (skb_dst(skb))
3273 		oif = skb_dst(skb)->dev->ifindex;
3274 
3275 	memset(fl4, 0, sizeof(struct flowi4));
3276 	fl4->flowi4_mark = skb->mark;
3277 	fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
3278 
3279 	fl4->flowi4_proto = iph->protocol;
3280 	fl4->daddr = reverse ? iph->saddr : iph->daddr;
3281 	fl4->saddr = reverse ? iph->daddr : iph->saddr;
3282 	fl4->flowi4_tos = iph->tos;
3283 
3284 	if (!ip_is_fragment(iph)) {
3285 		switch (iph->protocol) {
3286 		case IPPROTO_UDP:
3287 		case IPPROTO_UDPLITE:
3288 		case IPPROTO_TCP:
3289 		case IPPROTO_SCTP:
3290 		case IPPROTO_DCCP:
3291 			if (xprth + 4 < skb->data ||
3292 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3293 				__be16 *ports;
3294 
3295 				xprth = skb_network_header(skb) + ihl * 4;
3296 				ports = (__be16 *)xprth;
3297 
3298 				fl4->fl4_sport = ports[!!reverse];
3299 				fl4->fl4_dport = ports[!reverse];
3300 			}
3301 			break;
3302 		case IPPROTO_ICMP:
3303 			if (xprth + 2 < skb->data ||
3304 			    pskb_may_pull(skb, xprth + 2 - skb->data)) {
3305 				u8 *icmp;
3306 
3307 				xprth = skb_network_header(skb) + ihl * 4;
3308 				icmp = xprth;
3309 
3310 				fl4->fl4_icmp_type = icmp[0];
3311 				fl4->fl4_icmp_code = icmp[1];
3312 			}
3313 			break;
3314 		case IPPROTO_ESP:
3315 			if (xprth + 4 < skb->data ||
3316 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3317 				__be32 *ehdr;
3318 
3319 				xprth = skb_network_header(skb) + ihl * 4;
3320 				ehdr = (__be32 *)xprth;
3321 
3322 				fl4->fl4_ipsec_spi = ehdr[0];
3323 			}
3324 			break;
3325 		case IPPROTO_AH:
3326 			if (xprth + 8 < skb->data ||
3327 			    pskb_may_pull(skb, xprth + 8 - skb->data)) {
3328 				__be32 *ah_hdr;
3329 
3330 				xprth = skb_network_header(skb) + ihl * 4;
3331 				ah_hdr = (__be32 *)xprth;
3332 
3333 				fl4->fl4_ipsec_spi = ah_hdr[1];
3334 			}
3335 			break;
3336 		case IPPROTO_COMP:
3337 			if (xprth + 4 < skb->data ||
3338 			    pskb_may_pull(skb, xprth + 4 - skb->data)) {
3339 				__be16 *ipcomp_hdr;
3340 
3341 				xprth = skb_network_header(skb) + ihl * 4;
3342 				ipcomp_hdr = (__be16 *)xprth;
3343 
3344 				fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
3345 			}
3346 			break;
3347 		case IPPROTO_GRE:
3348 			if (xprth + 12 < skb->data ||
3349 			    pskb_may_pull(skb, xprth + 12 - skb->data)) {
3350 				__be16 *greflags;
3351 				__be32 *gre_hdr;
3352 
3353 				xprth = skb_network_header(skb) + ihl * 4;
3354 				greflags = (__be16 *)xprth;
3355 				gre_hdr = (__be32 *)xprth;
3356 
3357 				if (greflags[0] & GRE_KEY) {
3358 					if (greflags[0] & GRE_CSUM)
3359 						gre_hdr++;
3360 					fl4->fl4_gre_key = gre_hdr[1];
3361 				}
3362 			}
3363 			break;
3364 		default:
3365 			fl4->fl4_ipsec_spi = 0;
3366 			break;
3367 		}
3368 	}
3369 }
3370 
3371 #if IS_ENABLED(CONFIG_IPV6)
3372 static void
3373 decode_session6(struct sk_buff *skb, struct flowi *fl, bool reverse)
3374 {
3375 	struct flowi6 *fl6 = &fl->u.ip6;
3376 	int onlyproto = 0;
3377 	const struct ipv6hdr *hdr = ipv6_hdr(skb);
3378 	u32 offset = sizeof(*hdr);
3379 	struct ipv6_opt_hdr *exthdr;
3380 	const unsigned char *nh = skb_network_header(skb);
3381 	u16 nhoff = IP6CB(skb)->nhoff;
3382 	int oif = 0;
3383 	u8 nexthdr;
3384 
3385 	if (!nhoff)
3386 		nhoff = offsetof(struct ipv6hdr, nexthdr);
3387 
3388 	nexthdr = nh[nhoff];
3389 
3390 	if (skb_dst(skb))
3391 		oif = skb_dst(skb)->dev->ifindex;
3392 
3393 	memset(fl6, 0, sizeof(struct flowi6));
3394 	fl6->flowi6_mark = skb->mark;
3395 	fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
3396 
3397 	fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
3398 	fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
3399 
3400 	while (nh + offset + sizeof(*exthdr) < skb->data ||
3401 	       pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) {
3402 		nh = skb_network_header(skb);
3403 		exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3404 
3405 		switch (nexthdr) {
3406 		case NEXTHDR_FRAGMENT:
3407 			onlyproto = 1;
3408 			/* fall through */
3409 		case NEXTHDR_ROUTING:
3410 		case NEXTHDR_HOP:
3411 		case NEXTHDR_DEST:
3412 			offset += ipv6_optlen(exthdr);
3413 			nexthdr = exthdr->nexthdr;
3414 			exthdr = (struct ipv6_opt_hdr *)(nh + offset);
3415 			break;
3416 		case IPPROTO_UDP:
3417 		case IPPROTO_UDPLITE:
3418 		case IPPROTO_TCP:
3419 		case IPPROTO_SCTP:
3420 		case IPPROTO_DCCP:
3421 			if (!onlyproto && (nh + offset + 4 < skb->data ||
3422 			     pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
3423 				__be16 *ports;
3424 
3425 				nh = skb_network_header(skb);
3426 				ports = (__be16 *)(nh + offset);
3427 				fl6->fl6_sport = ports[!!reverse];
3428 				fl6->fl6_dport = ports[!reverse];
3429 			}
3430 			fl6->flowi6_proto = nexthdr;
3431 			return;
3432 		case IPPROTO_ICMPV6:
3433 			if (!onlyproto && (nh + offset + 2 < skb->data ||
3434 			    pskb_may_pull(skb, nh + offset + 2 - skb->data))) {
3435 				u8 *icmp;
3436 
3437 				nh = skb_network_header(skb);
3438 				icmp = (u8 *)(nh + offset);
3439 				fl6->fl6_icmp_type = icmp[0];
3440 				fl6->fl6_icmp_code = icmp[1];
3441 			}
3442 			fl6->flowi6_proto = nexthdr;
3443 			return;
3444 #if IS_ENABLED(CONFIG_IPV6_MIP6)
3445 		case IPPROTO_MH:
3446 			offset += ipv6_optlen(exthdr);
3447 			if (!onlyproto && (nh + offset + 3 < skb->data ||
3448 			    pskb_may_pull(skb, nh + offset + 3 - skb->data))) {
3449 				struct ip6_mh *mh;
3450 
3451 				nh = skb_network_header(skb);
3452 				mh = (struct ip6_mh *)(nh + offset);
3453 				fl6->fl6_mh_type = mh->ip6mh_type;
3454 			}
3455 			fl6->flowi6_proto = nexthdr;
3456 			return;
3457 #endif
3458 		/* XXX Why are there these headers? */
3459 		case IPPROTO_AH:
3460 		case IPPROTO_ESP:
3461 		case IPPROTO_COMP:
3462 		default:
3463 			fl6->fl6_ipsec_spi = 0;
3464 			fl6->flowi6_proto = nexthdr;
3465 			return;
3466 		}
3467 	}
3468 }
3469 #endif
3470 
3471 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
3472 			  unsigned int family, int reverse)
3473 {
3474 	switch (family) {
3475 	case AF_INET:
3476 		decode_session4(skb, fl, reverse);
3477 		break;
3478 #if IS_ENABLED(CONFIG_IPV6)
3479 	case AF_INET6:
3480 		decode_session6(skb, fl, reverse);
3481 		break;
3482 #endif
3483 	default:
3484 		return -EAFNOSUPPORT;
3485 	}
3486 
3487 	return security_xfrm_decode_session(skb, &fl->flowi_secid);
3488 }
3489 EXPORT_SYMBOL(__xfrm_decode_session);
3490 
3491 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3492 {
3493 	for (; k < sp->len; k++) {
3494 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3495 			*idxp = k;
3496 			return 1;
3497 		}
3498 	}
3499 
3500 	return 0;
3501 }
3502 
3503 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3504 			unsigned short family)
3505 {
3506 	struct net *net = dev_net(skb->dev);
3507 	struct xfrm_policy *pol;
3508 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3509 	int npols = 0;
3510 	int xfrm_nr;
3511 	int pi;
3512 	int reverse;
3513 	struct flowi fl;
3514 	int xerr_idx = -1;
3515 	const struct xfrm_if_cb *ifcb;
3516 	struct sec_path *sp;
3517 	struct xfrm_if *xi;
3518 	u32 if_id = 0;
3519 
3520 	rcu_read_lock();
3521 	ifcb = xfrm_if_get_cb();
3522 
3523 	if (ifcb) {
3524 		xi = ifcb->decode_session(skb, family);
3525 		if (xi) {
3526 			if_id = xi->p.if_id;
3527 			net = xi->net;
3528 		}
3529 	}
3530 	rcu_read_unlock();
3531 
3532 	reverse = dir & ~XFRM_POLICY_MASK;
3533 	dir &= XFRM_POLICY_MASK;
3534 
3535 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
3536 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3537 		return 0;
3538 	}
3539 
3540 	nf_nat_decode_session(skb, &fl, family);
3541 
3542 	/* First, check used SA against their selectors. */
3543 	sp = skb_sec_path(skb);
3544 	if (sp) {
3545 		int i;
3546 
3547 		for (i = sp->len - 1; i >= 0; i--) {
3548 			struct xfrm_state *x = sp->xvec[i];
3549 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
3550 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3551 				return 0;
3552 			}
3553 		}
3554 	}
3555 
3556 	pol = NULL;
3557 	sk = sk_to_full_sk(sk);
3558 	if (sk && sk->sk_policy[dir]) {
3559 		pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3560 		if (IS_ERR(pol)) {
3561 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3562 			return 0;
3563 		}
3564 	}
3565 
3566 	if (!pol)
3567 		pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3568 
3569 	if (IS_ERR(pol)) {
3570 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3571 		return 0;
3572 	}
3573 
3574 	if (!pol) {
3575 		if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) {
3576 			xfrm_secpath_reject(xerr_idx, skb, &fl);
3577 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3578 			return 0;
3579 		}
3580 		return 1;
3581 	}
3582 
3583 	pol->curlft.use_time = ktime_get_real_seconds();
3584 
3585 	pols[0] = pol;
3586 	npols++;
3587 #ifdef CONFIG_XFRM_SUB_POLICY
3588 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3589 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3590 						    &fl, family,
3591 						    XFRM_POLICY_IN, if_id);
3592 		if (pols[1]) {
3593 			if (IS_ERR(pols[1])) {
3594 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3595 				return 0;
3596 			}
3597 			pols[1]->curlft.use_time = ktime_get_real_seconds();
3598 			npols++;
3599 		}
3600 	}
3601 #endif
3602 
3603 	if (pol->action == XFRM_POLICY_ALLOW) {
3604 		static struct sec_path dummy;
3605 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3606 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3607 		struct xfrm_tmpl **tpp = tp;
3608 		int ti = 0;
3609 		int i, k;
3610 
3611 		sp = skb_sec_path(skb);
3612 		if (!sp)
3613 			sp = &dummy;
3614 
3615 		for (pi = 0; pi < npols; pi++) {
3616 			if (pols[pi] != pol &&
3617 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
3618 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3619 				goto reject;
3620 			}
3621 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3622 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3623 				goto reject_error;
3624 			}
3625 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
3626 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
3627 		}
3628 		xfrm_nr = ti;
3629 		if (npols > 1) {
3630 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3631 			tpp = stp;
3632 		}
3633 
3634 		/* For each tunnel xfrm, find the first matching tmpl.
3635 		 * For each tmpl before that, find corresponding xfrm.
3636 		 * Order is _important_. Later we will implement
3637 		 * some barriers, but at the moment barriers
3638 		 * are implied between each two transformations.
3639 		 */
3640 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3641 			k = xfrm_policy_ok(tpp[i], sp, k, family);
3642 			if (k < 0) {
3643 				if (k < -1)
3644 					/* "-2 - errored_index" returned */
3645 					xerr_idx = -(2+k);
3646 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3647 				goto reject;
3648 			}
3649 		}
3650 
3651 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3652 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3653 			goto reject;
3654 		}
3655 
3656 		xfrm_pols_put(pols, npols);
3657 		return 1;
3658 	}
3659 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3660 
3661 reject:
3662 	xfrm_secpath_reject(xerr_idx, skb, &fl);
3663 reject_error:
3664 	xfrm_pols_put(pols, npols);
3665 	return 0;
3666 }
3667 EXPORT_SYMBOL(__xfrm_policy_check);
3668 
3669 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3670 {
3671 	struct net *net = dev_net(skb->dev);
3672 	struct flowi fl;
3673 	struct dst_entry *dst;
3674 	int res = 1;
3675 
3676 	if (xfrm_decode_session(skb, &fl, family) < 0) {
3677 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3678 		return 0;
3679 	}
3680 
3681 	skb_dst_force(skb);
3682 	if (!skb_dst(skb)) {
3683 		XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3684 		return 0;
3685 	}
3686 
3687 	dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3688 	if (IS_ERR(dst)) {
3689 		res = 0;
3690 		dst = NULL;
3691 	}
3692 	skb_dst_set(skb, dst);
3693 	return res;
3694 }
3695 EXPORT_SYMBOL(__xfrm_route_forward);
3696 
3697 /* Optimize later using cookies and generation ids. */
3698 
3699 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3700 {
3701 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3702 	 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3703 	 * get validated by dst_ops->check on every use.  We do this
3704 	 * because when a normal route referenced by an XFRM dst is
3705 	 * obsoleted we do not go looking around for all parent
3706 	 * referencing XFRM dsts so that we can invalidate them.  It
3707 	 * is just too much work.  Instead we make the checks here on
3708 	 * every use.  For example:
3709 	 *
3710 	 *	XFRM dst A --> IPv4 dst X
3711 	 *
3712 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
3713 	 * in this example).  If X is marked obsolete, "A" will not
3714 	 * notice.  That's what we are validating here via the
3715 	 * stale_bundle() check.
3716 	 *
3717 	 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3718 	 * be marked on it.
3719 	 * This will force stale_bundle() to fail on any xdst bundle with
3720 	 * this dst linked in it.
3721 	 */
3722 	if (dst->obsolete < 0 && !stale_bundle(dst))
3723 		return dst;
3724 
3725 	return NULL;
3726 }
3727 
3728 static int stale_bundle(struct dst_entry *dst)
3729 {
3730 	return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3731 }
3732 
3733 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3734 {
3735 	while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3736 		dst->dev = dev_net(dev)->loopback_dev;
3737 		dev_hold(dst->dev);
3738 		dev_put(dev);
3739 	}
3740 }
3741 EXPORT_SYMBOL(xfrm_dst_ifdown);
3742 
3743 static void xfrm_link_failure(struct sk_buff *skb)
3744 {
3745 	/* Impossible. Such dst must be popped before reaches point of failure. */
3746 }
3747 
3748 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
3749 {
3750 	if (dst) {
3751 		if (dst->obsolete) {
3752 			dst_release(dst);
3753 			dst = NULL;
3754 		}
3755 	}
3756 	return dst;
3757 }
3758 
3759 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3760 {
3761 	while (nr--) {
3762 		struct xfrm_dst *xdst = bundle[nr];
3763 		u32 pmtu, route_mtu_cached;
3764 		struct dst_entry *dst;
3765 
3766 		dst = &xdst->u.dst;
3767 		pmtu = dst_mtu(xfrm_dst_child(dst));
3768 		xdst->child_mtu_cached = pmtu;
3769 
3770 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3771 
3772 		route_mtu_cached = dst_mtu(xdst->route);
3773 		xdst->route_mtu_cached = route_mtu_cached;
3774 
3775 		if (pmtu > route_mtu_cached)
3776 			pmtu = route_mtu_cached;
3777 
3778 		dst_metric_set(dst, RTAX_MTU, pmtu);
3779 	}
3780 }
3781 
3782 /* Check that the bundle accepts the flow and its components are
3783  * still valid.
3784  */
3785 
3786 static int xfrm_bundle_ok(struct xfrm_dst *first)
3787 {
3788 	struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3789 	struct dst_entry *dst = &first->u.dst;
3790 	struct xfrm_dst *xdst;
3791 	int start_from, nr;
3792 	u32 mtu;
3793 
3794 	if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3795 	    (dst->dev && !netif_running(dst->dev)))
3796 		return 0;
3797 
3798 	if (dst->flags & DST_XFRM_QUEUE)
3799 		return 1;
3800 
3801 	start_from = nr = 0;
3802 	do {
3803 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3804 
3805 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
3806 			return 0;
3807 		if (xdst->xfrm_genid != dst->xfrm->genid)
3808 			return 0;
3809 		if (xdst->num_pols > 0 &&
3810 		    xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3811 			return 0;
3812 
3813 		bundle[nr++] = xdst;
3814 
3815 		mtu = dst_mtu(xfrm_dst_child(dst));
3816 		if (xdst->child_mtu_cached != mtu) {
3817 			start_from = nr;
3818 			xdst->child_mtu_cached = mtu;
3819 		}
3820 
3821 		if (!dst_check(xdst->route, xdst->route_cookie))
3822 			return 0;
3823 		mtu = dst_mtu(xdst->route);
3824 		if (xdst->route_mtu_cached != mtu) {
3825 			start_from = nr;
3826 			xdst->route_mtu_cached = mtu;
3827 		}
3828 
3829 		dst = xfrm_dst_child(dst);
3830 	} while (dst->xfrm);
3831 
3832 	if (likely(!start_from))
3833 		return 1;
3834 
3835 	xdst = bundle[start_from - 1];
3836 	mtu = xdst->child_mtu_cached;
3837 	while (start_from--) {
3838 		dst = &xdst->u.dst;
3839 
3840 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
3841 		if (mtu > xdst->route_mtu_cached)
3842 			mtu = xdst->route_mtu_cached;
3843 		dst_metric_set(dst, RTAX_MTU, mtu);
3844 		if (!start_from)
3845 			break;
3846 
3847 		xdst = bundle[start_from - 1];
3848 		xdst->child_mtu_cached = mtu;
3849 	}
3850 
3851 	return 1;
3852 }
3853 
3854 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
3855 {
3856 	return dst_metric_advmss(xfrm_dst_path(dst));
3857 }
3858 
3859 static unsigned int xfrm_mtu(const struct dst_entry *dst)
3860 {
3861 	unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
3862 
3863 	return mtu ? : dst_mtu(xfrm_dst_path(dst));
3864 }
3865 
3866 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
3867 					const void *daddr)
3868 {
3869 	while (dst->xfrm) {
3870 		const struct xfrm_state *xfrm = dst->xfrm;
3871 
3872 		dst = xfrm_dst_child(dst);
3873 
3874 		if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
3875 			continue;
3876 		if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
3877 			daddr = xfrm->coaddr;
3878 		else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
3879 			daddr = &xfrm->id.daddr;
3880 	}
3881 	return daddr;
3882 }
3883 
3884 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
3885 					   struct sk_buff *skb,
3886 					   const void *daddr)
3887 {
3888 	const struct dst_entry *path = xfrm_dst_path(dst);
3889 
3890 	if (!skb)
3891 		daddr = xfrm_get_dst_nexthop(dst, daddr);
3892 	return path->ops->neigh_lookup(path, skb, daddr);
3893 }
3894 
3895 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
3896 {
3897 	const struct dst_entry *path = xfrm_dst_path(dst);
3898 
3899 	daddr = xfrm_get_dst_nexthop(dst, daddr);
3900 	path->ops->confirm_neigh(path, daddr);
3901 }
3902 
3903 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
3904 {
3905 	int err = 0;
3906 
3907 	if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
3908 		return -EAFNOSUPPORT;
3909 
3910 	spin_lock(&xfrm_policy_afinfo_lock);
3911 	if (unlikely(xfrm_policy_afinfo[family] != NULL))
3912 		err = -EEXIST;
3913 	else {
3914 		struct dst_ops *dst_ops = afinfo->dst_ops;
3915 		if (likely(dst_ops->kmem_cachep == NULL))
3916 			dst_ops->kmem_cachep = xfrm_dst_cache;
3917 		if (likely(dst_ops->check == NULL))
3918 			dst_ops->check = xfrm_dst_check;
3919 		if (likely(dst_ops->default_advmss == NULL))
3920 			dst_ops->default_advmss = xfrm_default_advmss;
3921 		if (likely(dst_ops->mtu == NULL))
3922 			dst_ops->mtu = xfrm_mtu;
3923 		if (likely(dst_ops->negative_advice == NULL))
3924 			dst_ops->negative_advice = xfrm_negative_advice;
3925 		if (likely(dst_ops->link_failure == NULL))
3926 			dst_ops->link_failure = xfrm_link_failure;
3927 		if (likely(dst_ops->neigh_lookup == NULL))
3928 			dst_ops->neigh_lookup = xfrm_neigh_lookup;
3929 		if (likely(!dst_ops->confirm_neigh))
3930 			dst_ops->confirm_neigh = xfrm_confirm_neigh;
3931 		rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
3932 	}
3933 	spin_unlock(&xfrm_policy_afinfo_lock);
3934 
3935 	return err;
3936 }
3937 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
3938 
3939 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
3940 {
3941 	struct dst_ops *dst_ops = afinfo->dst_ops;
3942 	int i;
3943 
3944 	for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
3945 		if (xfrm_policy_afinfo[i] != afinfo)
3946 			continue;
3947 		RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
3948 		break;
3949 	}
3950 
3951 	synchronize_rcu();
3952 
3953 	dst_ops->kmem_cachep = NULL;
3954 	dst_ops->check = NULL;
3955 	dst_ops->negative_advice = NULL;
3956 	dst_ops->link_failure = NULL;
3957 }
3958 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
3959 
3960 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
3961 {
3962 	spin_lock(&xfrm_if_cb_lock);
3963 	rcu_assign_pointer(xfrm_if_cb, ifcb);
3964 	spin_unlock(&xfrm_if_cb_lock);
3965 }
3966 EXPORT_SYMBOL(xfrm_if_register_cb);
3967 
3968 void xfrm_if_unregister_cb(void)
3969 {
3970 	RCU_INIT_POINTER(xfrm_if_cb, NULL);
3971 	synchronize_rcu();
3972 }
3973 EXPORT_SYMBOL(xfrm_if_unregister_cb);
3974 
3975 #ifdef CONFIG_XFRM_STATISTICS
3976 static int __net_init xfrm_statistics_init(struct net *net)
3977 {
3978 	int rv;
3979 	net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
3980 	if (!net->mib.xfrm_statistics)
3981 		return -ENOMEM;
3982 	rv = xfrm_proc_init(net);
3983 	if (rv < 0)
3984 		free_percpu(net->mib.xfrm_statistics);
3985 	return rv;
3986 }
3987 
3988 static void xfrm_statistics_fini(struct net *net)
3989 {
3990 	xfrm_proc_fini(net);
3991 	free_percpu(net->mib.xfrm_statistics);
3992 }
3993 #else
3994 static int __net_init xfrm_statistics_init(struct net *net)
3995 {
3996 	return 0;
3997 }
3998 
3999 static void xfrm_statistics_fini(struct net *net)
4000 {
4001 }
4002 #endif
4003 
4004 static int __net_init xfrm_policy_init(struct net *net)
4005 {
4006 	unsigned int hmask, sz;
4007 	int dir, err;
4008 
4009 	if (net_eq(net, &init_net)) {
4010 		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
4011 					   sizeof(struct xfrm_dst),
4012 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4013 					   NULL);
4014 		err = rhashtable_init(&xfrm_policy_inexact_table,
4015 				      &xfrm_pol_inexact_params);
4016 		BUG_ON(err);
4017 	}
4018 
4019 	hmask = 8 - 1;
4020 	sz = (hmask+1) * sizeof(struct hlist_head);
4021 
4022 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4023 	if (!net->xfrm.policy_byidx)
4024 		goto out_byidx;
4025 	net->xfrm.policy_idx_hmask = hmask;
4026 
4027 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4028 		struct xfrm_policy_hash *htab;
4029 
4030 		net->xfrm.policy_count[dir] = 0;
4031 		net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4032 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4033 
4034 		htab = &net->xfrm.policy_bydst[dir];
4035 		htab->table = xfrm_hash_alloc(sz);
4036 		if (!htab->table)
4037 			goto out_bydst;
4038 		htab->hmask = hmask;
4039 		htab->dbits4 = 32;
4040 		htab->sbits4 = 32;
4041 		htab->dbits6 = 128;
4042 		htab->sbits6 = 128;
4043 	}
4044 	net->xfrm.policy_hthresh.lbits4 = 32;
4045 	net->xfrm.policy_hthresh.rbits4 = 32;
4046 	net->xfrm.policy_hthresh.lbits6 = 128;
4047 	net->xfrm.policy_hthresh.rbits6 = 128;
4048 
4049 	seqlock_init(&net->xfrm.policy_hthresh.lock);
4050 
4051 	INIT_LIST_HEAD(&net->xfrm.policy_all);
4052 	INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4053 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4054 	INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4055 	return 0;
4056 
4057 out_bydst:
4058 	for (dir--; dir >= 0; dir--) {
4059 		struct xfrm_policy_hash *htab;
4060 
4061 		htab = &net->xfrm.policy_bydst[dir];
4062 		xfrm_hash_free(htab->table, sz);
4063 	}
4064 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4065 out_byidx:
4066 	return -ENOMEM;
4067 }
4068 
4069 static void xfrm_policy_fini(struct net *net)
4070 {
4071 	struct xfrm_pol_inexact_bin *b, *t;
4072 	unsigned int sz;
4073 	int dir;
4074 
4075 	flush_work(&net->xfrm.policy_hash_work);
4076 #ifdef CONFIG_XFRM_SUB_POLICY
4077 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4078 #endif
4079 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4080 
4081 	WARN_ON(!list_empty(&net->xfrm.policy_all));
4082 
4083 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4084 		struct xfrm_policy_hash *htab;
4085 
4086 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4087 
4088 		htab = &net->xfrm.policy_bydst[dir];
4089 		sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4090 		WARN_ON(!hlist_empty(htab->table));
4091 		xfrm_hash_free(htab->table, sz);
4092 	}
4093 
4094 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4095 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4096 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
4097 
4098 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4099 	list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4100 		__xfrm_policy_inexact_prune_bin(b, true);
4101 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4102 }
4103 
4104 static int __net_init xfrm_net_init(struct net *net)
4105 {
4106 	int rv;
4107 
4108 	/* Initialize the per-net locks here */
4109 	spin_lock_init(&net->xfrm.xfrm_state_lock);
4110 	spin_lock_init(&net->xfrm.xfrm_policy_lock);
4111 	mutex_init(&net->xfrm.xfrm_cfg_mutex);
4112 
4113 	rv = xfrm_statistics_init(net);
4114 	if (rv < 0)
4115 		goto out_statistics;
4116 	rv = xfrm_state_init(net);
4117 	if (rv < 0)
4118 		goto out_state;
4119 	rv = xfrm_policy_init(net);
4120 	if (rv < 0)
4121 		goto out_policy;
4122 	rv = xfrm_sysctl_init(net);
4123 	if (rv < 0)
4124 		goto out_sysctl;
4125 
4126 	return 0;
4127 
4128 out_sysctl:
4129 	xfrm_policy_fini(net);
4130 out_policy:
4131 	xfrm_state_fini(net);
4132 out_state:
4133 	xfrm_statistics_fini(net);
4134 out_statistics:
4135 	return rv;
4136 }
4137 
4138 static void __net_exit xfrm_net_exit(struct net *net)
4139 {
4140 	xfrm_sysctl_fini(net);
4141 	xfrm_policy_fini(net);
4142 	xfrm_state_fini(net);
4143 	xfrm_statistics_fini(net);
4144 }
4145 
4146 static struct pernet_operations __net_initdata xfrm_net_ops = {
4147 	.init = xfrm_net_init,
4148 	.exit = xfrm_net_exit,
4149 };
4150 
4151 void __init xfrm_init(void)
4152 {
4153 	register_pernet_subsys(&xfrm_net_ops);
4154 	xfrm_dev_init();
4155 	seqcount_init(&xfrm_policy_hash_generation);
4156 	xfrm_input_init();
4157 
4158 	RCU_INIT_POINTER(xfrm_if_cb, NULL);
4159 	synchronize_rcu();
4160 }
4161 
4162 #ifdef CONFIG_AUDITSYSCALL
4163 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4164 					 struct audit_buffer *audit_buf)
4165 {
4166 	struct xfrm_sec_ctx *ctx = xp->security;
4167 	struct xfrm_selector *sel = &xp->selector;
4168 
4169 	if (ctx)
4170 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4171 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4172 
4173 	switch (sel->family) {
4174 	case AF_INET:
4175 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4176 		if (sel->prefixlen_s != 32)
4177 			audit_log_format(audit_buf, " src_prefixlen=%d",
4178 					 sel->prefixlen_s);
4179 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4180 		if (sel->prefixlen_d != 32)
4181 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4182 					 sel->prefixlen_d);
4183 		break;
4184 	case AF_INET6:
4185 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4186 		if (sel->prefixlen_s != 128)
4187 			audit_log_format(audit_buf, " src_prefixlen=%d",
4188 					 sel->prefixlen_s);
4189 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4190 		if (sel->prefixlen_d != 128)
4191 			audit_log_format(audit_buf, " dst_prefixlen=%d",
4192 					 sel->prefixlen_d);
4193 		break;
4194 	}
4195 }
4196 
4197 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4198 {
4199 	struct audit_buffer *audit_buf;
4200 
4201 	audit_buf = xfrm_audit_start("SPD-add");
4202 	if (audit_buf == NULL)
4203 		return;
4204 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4205 	audit_log_format(audit_buf, " res=%u", result);
4206 	xfrm_audit_common_policyinfo(xp, audit_buf);
4207 	audit_log_end(audit_buf);
4208 }
4209 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4210 
4211 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4212 			      bool task_valid)
4213 {
4214 	struct audit_buffer *audit_buf;
4215 
4216 	audit_buf = xfrm_audit_start("SPD-delete");
4217 	if (audit_buf == NULL)
4218 		return;
4219 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4220 	audit_log_format(audit_buf, " res=%u", result);
4221 	xfrm_audit_common_policyinfo(xp, audit_buf);
4222 	audit_log_end(audit_buf);
4223 }
4224 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4225 #endif
4226 
4227 #ifdef CONFIG_XFRM_MIGRATE
4228 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp,
4229 					const struct xfrm_selector *sel_tgt)
4230 {
4231 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
4232 		if (sel_tgt->family == sel_cmp->family &&
4233 		    xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr,
4234 				    sel_cmp->family) &&
4235 		    xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr,
4236 				    sel_cmp->family) &&
4237 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
4238 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
4239 			return true;
4240 		}
4241 	} else {
4242 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
4243 			return true;
4244 		}
4245 	}
4246 	return false;
4247 }
4248 
4249 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4250 						    u8 dir, u8 type, struct net *net)
4251 {
4252 	struct xfrm_policy *pol, *ret = NULL;
4253 	struct hlist_head *chain;
4254 	u32 priority = ~0U;
4255 
4256 	spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4257 	chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir);
4258 	hlist_for_each_entry(pol, chain, bydst) {
4259 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4260 		    pol->type == type) {
4261 			ret = pol;
4262 			priority = ret->priority;
4263 			break;
4264 		}
4265 	}
4266 	chain = &net->xfrm.policy_inexact[dir];
4267 	hlist_for_each_entry(pol, chain, bydst_inexact_list) {
4268 		if ((pol->priority >= priority) && ret)
4269 			break;
4270 
4271 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
4272 		    pol->type == type) {
4273 			ret = pol;
4274 			break;
4275 		}
4276 	}
4277 
4278 	xfrm_pol_hold(ret);
4279 
4280 	spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4281 
4282 	return ret;
4283 }
4284 
4285 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4286 {
4287 	int match = 0;
4288 
4289 	if (t->mode == m->mode && t->id.proto == m->proto &&
4290 	    (m->reqid == 0 || t->reqid == m->reqid)) {
4291 		switch (t->mode) {
4292 		case XFRM_MODE_TUNNEL:
4293 		case XFRM_MODE_BEET:
4294 			if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4295 					    m->old_family) &&
4296 			    xfrm_addr_equal(&t->saddr, &m->old_saddr,
4297 					    m->old_family)) {
4298 				match = 1;
4299 			}
4300 			break;
4301 		case XFRM_MODE_TRANSPORT:
4302 			/* in case of transport mode, template does not store
4303 			   any IP addresses, hence we just compare mode and
4304 			   protocol */
4305 			match = 1;
4306 			break;
4307 		default:
4308 			break;
4309 		}
4310 	}
4311 	return match;
4312 }
4313 
4314 /* update endpoint address(es) of template(s) */
4315 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4316 			       struct xfrm_migrate *m, int num_migrate)
4317 {
4318 	struct xfrm_migrate *mp;
4319 	int i, j, n = 0;
4320 
4321 	write_lock_bh(&pol->lock);
4322 	if (unlikely(pol->walk.dead)) {
4323 		/* target policy has been deleted */
4324 		write_unlock_bh(&pol->lock);
4325 		return -ENOENT;
4326 	}
4327 
4328 	for (i = 0; i < pol->xfrm_nr; i++) {
4329 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4330 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4331 				continue;
4332 			n++;
4333 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4334 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4335 				continue;
4336 			/* update endpoints */
4337 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4338 			       sizeof(pol->xfrm_vec[i].id.daddr));
4339 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4340 			       sizeof(pol->xfrm_vec[i].saddr));
4341 			pol->xfrm_vec[i].encap_family = mp->new_family;
4342 			/* flush bundles */
4343 			atomic_inc(&pol->genid);
4344 		}
4345 	}
4346 
4347 	write_unlock_bh(&pol->lock);
4348 
4349 	if (!n)
4350 		return -ENODATA;
4351 
4352 	return 0;
4353 }
4354 
4355 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate)
4356 {
4357 	int i, j;
4358 
4359 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
4360 		return -EINVAL;
4361 
4362 	for (i = 0; i < num_migrate; i++) {
4363 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4364 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
4365 			return -EINVAL;
4366 
4367 		/* check if there is any duplicated entry */
4368 		for (j = i + 1; j < num_migrate; j++) {
4369 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4370 				    sizeof(m[i].old_daddr)) &&
4371 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4372 				    sizeof(m[i].old_saddr)) &&
4373 			    m[i].proto == m[j].proto &&
4374 			    m[i].mode == m[j].mode &&
4375 			    m[i].reqid == m[j].reqid &&
4376 			    m[i].old_family == m[j].old_family)
4377 				return -EINVAL;
4378 		}
4379 	}
4380 
4381 	return 0;
4382 }
4383 
4384 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4385 		 struct xfrm_migrate *m, int num_migrate,
4386 		 struct xfrm_kmaddress *k, struct net *net,
4387 		 struct xfrm_encap_tmpl *encap)
4388 {
4389 	int i, err, nx_cur = 0, nx_new = 0;
4390 	struct xfrm_policy *pol = NULL;
4391 	struct xfrm_state *x, *xc;
4392 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4393 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4394 	struct xfrm_migrate *mp;
4395 
4396 	/* Stage 0 - sanity checks */
4397 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
4398 		goto out;
4399 
4400 	if (dir >= XFRM_POLICY_MAX) {
4401 		err = -EINVAL;
4402 		goto out;
4403 	}
4404 
4405 	/* Stage 1 - find policy */
4406 	if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
4407 		err = -ENOENT;
4408 		goto out;
4409 	}
4410 
4411 	/* Stage 2 - find and update state(s) */
4412 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4413 		if ((x = xfrm_migrate_state_find(mp, net))) {
4414 			x_cur[nx_cur] = x;
4415 			nx_cur++;
4416 			xc = xfrm_state_migrate(x, mp, encap);
4417 			if (xc) {
4418 				x_new[nx_new] = xc;
4419 				nx_new++;
4420 			} else {
4421 				err = -ENODATA;
4422 				goto restore_state;
4423 			}
4424 		}
4425 	}
4426 
4427 	/* Stage 3 - update policy */
4428 	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
4429 		goto restore_state;
4430 
4431 	/* Stage 4 - delete old state(s) */
4432 	if (nx_cur) {
4433 		xfrm_states_put(x_cur, nx_cur);
4434 		xfrm_states_delete(x_cur, nx_cur);
4435 	}
4436 
4437 	/* Stage 5 - announce */
4438 	km_migrate(sel, dir, type, m, num_migrate, k, encap);
4439 
4440 	xfrm_pol_put(pol);
4441 
4442 	return 0;
4443 out:
4444 	return err;
4445 
4446 restore_state:
4447 	if (pol)
4448 		xfrm_pol_put(pol);
4449 	if (nx_cur)
4450 		xfrm_states_put(x_cur, nx_cur);
4451 	if (nx_new)
4452 		xfrm_states_delete(x_new, nx_new);
4453 
4454 	return err;
4455 }
4456 EXPORT_SYMBOL(xfrm_migrate);
4457 #endif
4458