xref: /linux/net/xfrm/xfrm_policy.c (revision 6ee738610f41b59733f63718f0bdbcba7d3a3f12)
1 /*
2  * xfrm_policy.c
3  *
4  * Changes:
5  *	Mitsuru KANDA @USAGI
6  * 	Kazunori MIYAZAWA @USAGI
7  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
8  * 		IPv6 support
9  * 	Kazunori MIYAZAWA @USAGI
10  * 	YOSHIFUJI Hideaki
11  * 		Split up af-specific portion
12  *	Derek Atkins <derek@ihtfp.com>		Add the post_input processor
13  *
14  */
15 
16 #include <linux/err.h>
17 #include <linux/slab.h>
18 #include <linux/kmod.h>
19 #include <linux/list.h>
20 #include <linux/spinlock.h>
21 #include <linux/workqueue.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/module.h>
26 #include <linux/cache.h>
27 #include <linux/audit.h>
28 #include <net/dst.h>
29 #include <net/xfrm.h>
30 #include <net/ip.h>
31 #ifdef CONFIG_XFRM_STATISTICS
32 #include <net/snmp.h>
33 #endif
34 
35 #include "xfrm_hash.h"
36 
37 DEFINE_MUTEX(xfrm_cfg_mutex);
38 EXPORT_SYMBOL(xfrm_cfg_mutex);
39 
40 static DEFINE_RWLOCK(xfrm_policy_lock);
41 
42 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
43 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
44 
45 static struct kmem_cache *xfrm_dst_cache __read_mostly;
46 
47 static HLIST_HEAD(xfrm_policy_gc_list);
48 static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
49 
50 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
51 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
52 static void xfrm_init_pmtu(struct dst_entry *dst);
53 
54 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
55 						int dir);
56 
57 static inline int
58 __xfrm4_selector_match(struct xfrm_selector *sel, struct flowi *fl)
59 {
60 	return  addr_match(&fl->fl4_dst, &sel->daddr, sel->prefixlen_d) &&
61 		addr_match(&fl->fl4_src, &sel->saddr, sel->prefixlen_s) &&
62 		!((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
63 		!((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
64 		(fl->proto == sel->proto || !sel->proto) &&
65 		(fl->oif == sel->ifindex || !sel->ifindex);
66 }
67 
68 static inline int
69 __xfrm6_selector_match(struct xfrm_selector *sel, struct flowi *fl)
70 {
71 	return  addr_match(&fl->fl6_dst, &sel->daddr, sel->prefixlen_d) &&
72 		addr_match(&fl->fl6_src, &sel->saddr, sel->prefixlen_s) &&
73 		!((xfrm_flowi_dport(fl) ^ sel->dport) & sel->dport_mask) &&
74 		!((xfrm_flowi_sport(fl) ^ sel->sport) & sel->sport_mask) &&
75 		(fl->proto == sel->proto || !sel->proto) &&
76 		(fl->oif == sel->ifindex || !sel->ifindex);
77 }
78 
79 int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
80 		    unsigned short family)
81 {
82 	switch (family) {
83 	case AF_INET:
84 		return __xfrm4_selector_match(sel, fl);
85 	case AF_INET6:
86 		return __xfrm6_selector_match(sel, fl);
87 	}
88 	return 0;
89 }
90 
91 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos,
92 						  xfrm_address_t *saddr,
93 						  xfrm_address_t *daddr,
94 						  int family)
95 {
96 	struct xfrm_policy_afinfo *afinfo;
97 	struct dst_entry *dst;
98 
99 	afinfo = xfrm_policy_get_afinfo(family);
100 	if (unlikely(afinfo == NULL))
101 		return ERR_PTR(-EAFNOSUPPORT);
102 
103 	dst = afinfo->dst_lookup(net, tos, saddr, daddr);
104 
105 	xfrm_policy_put_afinfo(afinfo);
106 
107 	return dst;
108 }
109 
110 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos,
111 						xfrm_address_t *prev_saddr,
112 						xfrm_address_t *prev_daddr,
113 						int family)
114 {
115 	struct net *net = xs_net(x);
116 	xfrm_address_t *saddr = &x->props.saddr;
117 	xfrm_address_t *daddr = &x->id.daddr;
118 	struct dst_entry *dst;
119 
120 	if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
121 		saddr = x->coaddr;
122 		daddr = prev_daddr;
123 	}
124 	if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
125 		saddr = prev_saddr;
126 		daddr = x->coaddr;
127 	}
128 
129 	dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family);
130 
131 	if (!IS_ERR(dst)) {
132 		if (prev_saddr != saddr)
133 			memcpy(prev_saddr, saddr,  sizeof(*prev_saddr));
134 		if (prev_daddr != daddr)
135 			memcpy(prev_daddr, daddr,  sizeof(*prev_daddr));
136 	}
137 
138 	return dst;
139 }
140 
141 static inline unsigned long make_jiffies(long secs)
142 {
143 	if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
144 		return MAX_SCHEDULE_TIMEOUT-1;
145 	else
146 		return secs*HZ;
147 }
148 
149 static void xfrm_policy_timer(unsigned long data)
150 {
151 	struct xfrm_policy *xp = (struct xfrm_policy*)data;
152 	unsigned long now = get_seconds();
153 	long next = LONG_MAX;
154 	int warn = 0;
155 	int dir;
156 
157 	read_lock(&xp->lock);
158 
159 	if (xp->walk.dead)
160 		goto out;
161 
162 	dir = xfrm_policy_id2dir(xp->index);
163 
164 	if (xp->lft.hard_add_expires_seconds) {
165 		long tmo = xp->lft.hard_add_expires_seconds +
166 			xp->curlft.add_time - now;
167 		if (tmo <= 0)
168 			goto expired;
169 		if (tmo < next)
170 			next = tmo;
171 	}
172 	if (xp->lft.hard_use_expires_seconds) {
173 		long tmo = xp->lft.hard_use_expires_seconds +
174 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
175 		if (tmo <= 0)
176 			goto expired;
177 		if (tmo < next)
178 			next = tmo;
179 	}
180 	if (xp->lft.soft_add_expires_seconds) {
181 		long tmo = xp->lft.soft_add_expires_seconds +
182 			xp->curlft.add_time - now;
183 		if (tmo <= 0) {
184 			warn = 1;
185 			tmo = XFRM_KM_TIMEOUT;
186 		}
187 		if (tmo < next)
188 			next = tmo;
189 	}
190 	if (xp->lft.soft_use_expires_seconds) {
191 		long tmo = xp->lft.soft_use_expires_seconds +
192 			(xp->curlft.use_time ? : xp->curlft.add_time) - now;
193 		if (tmo <= 0) {
194 			warn = 1;
195 			tmo = XFRM_KM_TIMEOUT;
196 		}
197 		if (tmo < next)
198 			next = tmo;
199 	}
200 
201 	if (warn)
202 		km_policy_expired(xp, dir, 0, 0);
203 	if (next != LONG_MAX &&
204 	    !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
205 		xfrm_pol_hold(xp);
206 
207 out:
208 	read_unlock(&xp->lock);
209 	xfrm_pol_put(xp);
210 	return;
211 
212 expired:
213 	read_unlock(&xp->lock);
214 	if (!xfrm_policy_delete(xp, dir))
215 		km_policy_expired(xp, dir, 1, 0);
216 	xfrm_pol_put(xp);
217 }
218 
219 
220 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
221  * SPD calls.
222  */
223 
224 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
225 {
226 	struct xfrm_policy *policy;
227 
228 	policy = kzalloc(sizeof(struct xfrm_policy), gfp);
229 
230 	if (policy) {
231 		write_pnet(&policy->xp_net, net);
232 		INIT_LIST_HEAD(&policy->walk.all);
233 		INIT_HLIST_NODE(&policy->bydst);
234 		INIT_HLIST_NODE(&policy->byidx);
235 		rwlock_init(&policy->lock);
236 		atomic_set(&policy->refcnt, 1);
237 		setup_timer(&policy->timer, xfrm_policy_timer,
238 				(unsigned long)policy);
239 	}
240 	return policy;
241 }
242 EXPORT_SYMBOL(xfrm_policy_alloc);
243 
244 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
245 
246 void xfrm_policy_destroy(struct xfrm_policy *policy)
247 {
248 	BUG_ON(!policy->walk.dead);
249 
250 	BUG_ON(policy->bundles);
251 
252 	if (del_timer(&policy->timer))
253 		BUG();
254 
255 	security_xfrm_policy_free(policy->security);
256 	kfree(policy);
257 }
258 EXPORT_SYMBOL(xfrm_policy_destroy);
259 
260 static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
261 {
262 	struct dst_entry *dst;
263 
264 	while ((dst = policy->bundles) != NULL) {
265 		policy->bundles = dst->next;
266 		dst_free(dst);
267 	}
268 
269 	if (del_timer(&policy->timer))
270 		atomic_dec(&policy->refcnt);
271 
272 	if (atomic_read(&policy->refcnt) > 1)
273 		flow_cache_flush();
274 
275 	xfrm_pol_put(policy);
276 }
277 
278 static void xfrm_policy_gc_task(struct work_struct *work)
279 {
280 	struct xfrm_policy *policy;
281 	struct hlist_node *entry, *tmp;
282 	struct hlist_head gc_list;
283 
284 	spin_lock_bh(&xfrm_policy_gc_lock);
285 	gc_list.first = xfrm_policy_gc_list.first;
286 	INIT_HLIST_HEAD(&xfrm_policy_gc_list);
287 	spin_unlock_bh(&xfrm_policy_gc_lock);
288 
289 	hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
290 		xfrm_policy_gc_kill(policy);
291 }
292 static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task);
293 
294 /* Rule must be locked. Release descentant resources, announce
295  * entry dead. The rule must be unlinked from lists to the moment.
296  */
297 
298 static void xfrm_policy_kill(struct xfrm_policy *policy)
299 {
300 	int dead;
301 
302 	write_lock_bh(&policy->lock);
303 	dead = policy->walk.dead;
304 	policy->walk.dead = 1;
305 	write_unlock_bh(&policy->lock);
306 
307 	if (unlikely(dead)) {
308 		WARN_ON(1);
309 		return;
310 	}
311 
312 	spin_lock_bh(&xfrm_policy_gc_lock);
313 	hlist_add_head(&policy->bydst, &xfrm_policy_gc_list);
314 	spin_unlock_bh(&xfrm_policy_gc_lock);
315 
316 	schedule_work(&xfrm_policy_gc_work);
317 }
318 
319 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
320 
321 static inline unsigned int idx_hash(struct net *net, u32 index)
322 {
323 	return __idx_hash(index, net->xfrm.policy_idx_hmask);
324 }
325 
326 static struct hlist_head *policy_hash_bysel(struct net *net, struct xfrm_selector *sel, unsigned short family, int dir)
327 {
328 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
329 	unsigned int hash = __sel_hash(sel, family, hmask);
330 
331 	return (hash == hmask + 1 ?
332 		&net->xfrm.policy_inexact[dir] :
333 		net->xfrm.policy_bydst[dir].table + hash);
334 }
335 
336 static struct hlist_head *policy_hash_direct(struct net *net, xfrm_address_t *daddr, xfrm_address_t *saddr, unsigned short family, int dir)
337 {
338 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
339 	unsigned int hash = __addr_hash(daddr, saddr, family, hmask);
340 
341 	return net->xfrm.policy_bydst[dir].table + hash;
342 }
343 
344 static void xfrm_dst_hash_transfer(struct hlist_head *list,
345 				   struct hlist_head *ndsttable,
346 				   unsigned int nhashmask)
347 {
348 	struct hlist_node *entry, *tmp, *entry0 = NULL;
349 	struct xfrm_policy *pol;
350 	unsigned int h0 = 0;
351 
352 redo:
353 	hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) {
354 		unsigned int h;
355 
356 		h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
357 				pol->family, nhashmask);
358 		if (!entry0) {
359 			hlist_del(entry);
360 			hlist_add_head(&pol->bydst, ndsttable+h);
361 			h0 = h;
362 		} else {
363 			if (h != h0)
364 				continue;
365 			hlist_del(entry);
366 			hlist_add_after(entry0, &pol->bydst);
367 		}
368 		entry0 = entry;
369 	}
370 	if (!hlist_empty(list)) {
371 		entry0 = NULL;
372 		goto redo;
373 	}
374 }
375 
376 static void xfrm_idx_hash_transfer(struct hlist_head *list,
377 				   struct hlist_head *nidxtable,
378 				   unsigned int nhashmask)
379 {
380 	struct hlist_node *entry, *tmp;
381 	struct xfrm_policy *pol;
382 
383 	hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) {
384 		unsigned int h;
385 
386 		h = __idx_hash(pol->index, nhashmask);
387 		hlist_add_head(&pol->byidx, nidxtable+h);
388 	}
389 }
390 
391 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
392 {
393 	return ((old_hmask + 1) << 1) - 1;
394 }
395 
396 static void xfrm_bydst_resize(struct net *net, int dir)
397 {
398 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
399 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
400 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
401 	struct hlist_head *odst = net->xfrm.policy_bydst[dir].table;
402 	struct hlist_head *ndst = xfrm_hash_alloc(nsize);
403 	int i;
404 
405 	if (!ndst)
406 		return;
407 
408 	write_lock_bh(&xfrm_policy_lock);
409 
410 	for (i = hmask; i >= 0; i--)
411 		xfrm_dst_hash_transfer(odst + i, ndst, nhashmask);
412 
413 	net->xfrm.policy_bydst[dir].table = ndst;
414 	net->xfrm.policy_bydst[dir].hmask = nhashmask;
415 
416 	write_unlock_bh(&xfrm_policy_lock);
417 
418 	xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
419 }
420 
421 static void xfrm_byidx_resize(struct net *net, int total)
422 {
423 	unsigned int hmask = net->xfrm.policy_idx_hmask;
424 	unsigned int nhashmask = xfrm_new_hash_mask(hmask);
425 	unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
426 	struct hlist_head *oidx = net->xfrm.policy_byidx;
427 	struct hlist_head *nidx = xfrm_hash_alloc(nsize);
428 	int i;
429 
430 	if (!nidx)
431 		return;
432 
433 	write_lock_bh(&xfrm_policy_lock);
434 
435 	for (i = hmask; i >= 0; i--)
436 		xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
437 
438 	net->xfrm.policy_byidx = nidx;
439 	net->xfrm.policy_idx_hmask = nhashmask;
440 
441 	write_unlock_bh(&xfrm_policy_lock);
442 
443 	xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
444 }
445 
446 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
447 {
448 	unsigned int cnt = net->xfrm.policy_count[dir];
449 	unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
450 
451 	if (total)
452 		*total += cnt;
453 
454 	if ((hmask + 1) < xfrm_policy_hashmax &&
455 	    cnt > hmask)
456 		return 1;
457 
458 	return 0;
459 }
460 
461 static inline int xfrm_byidx_should_resize(struct net *net, int total)
462 {
463 	unsigned int hmask = net->xfrm.policy_idx_hmask;
464 
465 	if ((hmask + 1) < xfrm_policy_hashmax &&
466 	    total > hmask)
467 		return 1;
468 
469 	return 0;
470 }
471 
472 void xfrm_spd_getinfo(struct xfrmk_spdinfo *si)
473 {
474 	read_lock_bh(&xfrm_policy_lock);
475 	si->incnt = init_net.xfrm.policy_count[XFRM_POLICY_IN];
476 	si->outcnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT];
477 	si->fwdcnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD];
478 	si->inscnt = init_net.xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
479 	si->outscnt = init_net.xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
480 	si->fwdscnt = init_net.xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
481 	si->spdhcnt = init_net.xfrm.policy_idx_hmask;
482 	si->spdhmcnt = xfrm_policy_hashmax;
483 	read_unlock_bh(&xfrm_policy_lock);
484 }
485 EXPORT_SYMBOL(xfrm_spd_getinfo);
486 
487 static DEFINE_MUTEX(hash_resize_mutex);
488 static void xfrm_hash_resize(struct work_struct *work)
489 {
490 	struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
491 	int dir, total;
492 
493 	mutex_lock(&hash_resize_mutex);
494 
495 	total = 0;
496 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
497 		if (xfrm_bydst_should_resize(net, dir, &total))
498 			xfrm_bydst_resize(net, dir);
499 	}
500 	if (xfrm_byidx_should_resize(net, total))
501 		xfrm_byidx_resize(net, total);
502 
503 	mutex_unlock(&hash_resize_mutex);
504 }
505 
506 /* Generate new index... KAME seems to generate them ordered by cost
507  * of an absolute inpredictability of ordering of rules. This will not pass. */
508 static u32 xfrm_gen_index(struct net *net, int dir)
509 {
510 	static u32 idx_generator;
511 
512 	for (;;) {
513 		struct hlist_node *entry;
514 		struct hlist_head *list;
515 		struct xfrm_policy *p;
516 		u32 idx;
517 		int found;
518 
519 		idx = (idx_generator | dir);
520 		idx_generator += 8;
521 		if (idx == 0)
522 			idx = 8;
523 		list = net->xfrm.policy_byidx + idx_hash(net, idx);
524 		found = 0;
525 		hlist_for_each_entry(p, entry, list, byidx) {
526 			if (p->index == idx) {
527 				found = 1;
528 				break;
529 			}
530 		}
531 		if (!found)
532 			return idx;
533 	}
534 }
535 
536 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
537 {
538 	u32 *p1 = (u32 *) s1;
539 	u32 *p2 = (u32 *) s2;
540 	int len = sizeof(struct xfrm_selector) / sizeof(u32);
541 	int i;
542 
543 	for (i = 0; i < len; i++) {
544 		if (p1[i] != p2[i])
545 			return 1;
546 	}
547 
548 	return 0;
549 }
550 
551 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
552 {
553 	struct net *net = xp_net(policy);
554 	struct xfrm_policy *pol;
555 	struct xfrm_policy *delpol;
556 	struct hlist_head *chain;
557 	struct hlist_node *entry, *newpos;
558 	struct dst_entry *gc_list;
559 
560 	write_lock_bh(&xfrm_policy_lock);
561 	chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
562 	delpol = NULL;
563 	newpos = NULL;
564 	hlist_for_each_entry(pol, entry, chain, bydst) {
565 		if (pol->type == policy->type &&
566 		    !selector_cmp(&pol->selector, &policy->selector) &&
567 		    xfrm_sec_ctx_match(pol->security, policy->security) &&
568 		    !WARN_ON(delpol)) {
569 			if (excl) {
570 				write_unlock_bh(&xfrm_policy_lock);
571 				return -EEXIST;
572 			}
573 			delpol = pol;
574 			if (policy->priority > pol->priority)
575 				continue;
576 		} else if (policy->priority >= pol->priority) {
577 			newpos = &pol->bydst;
578 			continue;
579 		}
580 		if (delpol)
581 			break;
582 	}
583 	if (newpos)
584 		hlist_add_after(newpos, &policy->bydst);
585 	else
586 		hlist_add_head(&policy->bydst, chain);
587 	xfrm_pol_hold(policy);
588 	net->xfrm.policy_count[dir]++;
589 	atomic_inc(&flow_cache_genid);
590 	if (delpol)
591 		__xfrm_policy_unlink(delpol, dir);
592 	policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
593 	hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
594 	policy->curlft.add_time = get_seconds();
595 	policy->curlft.use_time = 0;
596 	if (!mod_timer(&policy->timer, jiffies + HZ))
597 		xfrm_pol_hold(policy);
598 	list_add(&policy->walk.all, &net->xfrm.policy_all);
599 	write_unlock_bh(&xfrm_policy_lock);
600 
601 	if (delpol)
602 		xfrm_policy_kill(delpol);
603 	else if (xfrm_bydst_should_resize(net, dir, NULL))
604 		schedule_work(&net->xfrm.policy_hash_work);
605 
606 	read_lock_bh(&xfrm_policy_lock);
607 	gc_list = NULL;
608 	entry = &policy->bydst;
609 	hlist_for_each_entry_continue(policy, entry, bydst) {
610 		struct dst_entry *dst;
611 
612 		write_lock(&policy->lock);
613 		dst = policy->bundles;
614 		if (dst) {
615 			struct dst_entry *tail = dst;
616 			while (tail->next)
617 				tail = tail->next;
618 			tail->next = gc_list;
619 			gc_list = dst;
620 
621 			policy->bundles = NULL;
622 		}
623 		write_unlock(&policy->lock);
624 	}
625 	read_unlock_bh(&xfrm_policy_lock);
626 
627 	while (gc_list) {
628 		struct dst_entry *dst = gc_list;
629 
630 		gc_list = dst->next;
631 		dst_free(dst);
632 	}
633 
634 	return 0;
635 }
636 EXPORT_SYMBOL(xfrm_policy_insert);
637 
638 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u8 type, int dir,
639 					  struct xfrm_selector *sel,
640 					  struct xfrm_sec_ctx *ctx, int delete,
641 					  int *err)
642 {
643 	struct xfrm_policy *pol, *ret;
644 	struct hlist_head *chain;
645 	struct hlist_node *entry;
646 
647 	*err = 0;
648 	write_lock_bh(&xfrm_policy_lock);
649 	chain = policy_hash_bysel(net, sel, sel->family, dir);
650 	ret = NULL;
651 	hlist_for_each_entry(pol, entry, chain, bydst) {
652 		if (pol->type == type &&
653 		    !selector_cmp(sel, &pol->selector) &&
654 		    xfrm_sec_ctx_match(ctx, pol->security)) {
655 			xfrm_pol_hold(pol);
656 			if (delete) {
657 				*err = security_xfrm_policy_delete(
658 								pol->security);
659 				if (*err) {
660 					write_unlock_bh(&xfrm_policy_lock);
661 					return pol;
662 				}
663 				__xfrm_policy_unlink(pol, dir);
664 			}
665 			ret = pol;
666 			break;
667 		}
668 	}
669 	write_unlock_bh(&xfrm_policy_lock);
670 
671 	if (ret && delete) {
672 		atomic_inc(&flow_cache_genid);
673 		xfrm_policy_kill(ret);
674 	}
675 	return ret;
676 }
677 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
678 
679 struct xfrm_policy *xfrm_policy_byid(struct net *net, u8 type, int dir, u32 id,
680 				     int delete, int *err)
681 {
682 	struct xfrm_policy *pol, *ret;
683 	struct hlist_head *chain;
684 	struct hlist_node *entry;
685 
686 	*err = -ENOENT;
687 	if (xfrm_policy_id2dir(id) != dir)
688 		return NULL;
689 
690 	*err = 0;
691 	write_lock_bh(&xfrm_policy_lock);
692 	chain = net->xfrm.policy_byidx + idx_hash(net, id);
693 	ret = NULL;
694 	hlist_for_each_entry(pol, entry, chain, byidx) {
695 		if (pol->type == type && pol->index == id) {
696 			xfrm_pol_hold(pol);
697 			if (delete) {
698 				*err = security_xfrm_policy_delete(
699 								pol->security);
700 				if (*err) {
701 					write_unlock_bh(&xfrm_policy_lock);
702 					return pol;
703 				}
704 				__xfrm_policy_unlink(pol, dir);
705 			}
706 			ret = pol;
707 			break;
708 		}
709 	}
710 	write_unlock_bh(&xfrm_policy_lock);
711 
712 	if (ret && delete) {
713 		atomic_inc(&flow_cache_genid);
714 		xfrm_policy_kill(ret);
715 	}
716 	return ret;
717 }
718 EXPORT_SYMBOL(xfrm_policy_byid);
719 
720 #ifdef CONFIG_SECURITY_NETWORK_XFRM
721 static inline int
722 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
723 {
724 	int dir, err = 0;
725 
726 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
727 		struct xfrm_policy *pol;
728 		struct hlist_node *entry;
729 		int i;
730 
731 		hlist_for_each_entry(pol, entry,
732 				     &net->xfrm.policy_inexact[dir], bydst) {
733 			if (pol->type != type)
734 				continue;
735 			err = security_xfrm_policy_delete(pol->security);
736 			if (err) {
737 				xfrm_audit_policy_delete(pol, 0,
738 							 audit_info->loginuid,
739 							 audit_info->sessionid,
740 							 audit_info->secid);
741 				return err;
742 			}
743 		}
744 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
745 			hlist_for_each_entry(pol, entry,
746 					     net->xfrm.policy_bydst[dir].table + i,
747 					     bydst) {
748 				if (pol->type != type)
749 					continue;
750 				err = security_xfrm_policy_delete(
751 								pol->security);
752 				if (err) {
753 					xfrm_audit_policy_delete(pol, 0,
754 							audit_info->loginuid,
755 							audit_info->sessionid,
756 							audit_info->secid);
757 					return err;
758 				}
759 			}
760 		}
761 	}
762 	return err;
763 }
764 #else
765 static inline int
766 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info)
767 {
768 	return 0;
769 }
770 #endif
771 
772 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
773 {
774 	int dir, err = 0;
775 
776 	write_lock_bh(&xfrm_policy_lock);
777 
778 	err = xfrm_policy_flush_secctx_check(net, type, audit_info);
779 	if (err)
780 		goto out;
781 
782 	for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
783 		struct xfrm_policy *pol;
784 		struct hlist_node *entry;
785 		int i;
786 
787 	again1:
788 		hlist_for_each_entry(pol, entry,
789 				     &net->xfrm.policy_inexact[dir], bydst) {
790 			if (pol->type != type)
791 				continue;
792 			__xfrm_policy_unlink(pol, dir);
793 			write_unlock_bh(&xfrm_policy_lock);
794 
795 			xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
796 						 audit_info->sessionid,
797 						 audit_info->secid);
798 
799 			xfrm_policy_kill(pol);
800 
801 			write_lock_bh(&xfrm_policy_lock);
802 			goto again1;
803 		}
804 
805 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
806 	again2:
807 			hlist_for_each_entry(pol, entry,
808 					     net->xfrm.policy_bydst[dir].table + i,
809 					     bydst) {
810 				if (pol->type != type)
811 					continue;
812 				__xfrm_policy_unlink(pol, dir);
813 				write_unlock_bh(&xfrm_policy_lock);
814 
815 				xfrm_audit_policy_delete(pol, 1,
816 							 audit_info->loginuid,
817 							 audit_info->sessionid,
818 							 audit_info->secid);
819 				xfrm_policy_kill(pol);
820 
821 				write_lock_bh(&xfrm_policy_lock);
822 				goto again2;
823 			}
824 		}
825 
826 	}
827 	atomic_inc(&flow_cache_genid);
828 out:
829 	write_unlock_bh(&xfrm_policy_lock);
830 	return err;
831 }
832 EXPORT_SYMBOL(xfrm_policy_flush);
833 
834 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
835 		     int (*func)(struct xfrm_policy *, int, int, void*),
836 		     void *data)
837 {
838 	struct xfrm_policy *pol;
839 	struct xfrm_policy_walk_entry *x;
840 	int error = 0;
841 
842 	if (walk->type >= XFRM_POLICY_TYPE_MAX &&
843 	    walk->type != XFRM_POLICY_TYPE_ANY)
844 		return -EINVAL;
845 
846 	if (list_empty(&walk->walk.all) && walk->seq != 0)
847 		return 0;
848 
849 	write_lock_bh(&xfrm_policy_lock);
850 	if (list_empty(&walk->walk.all))
851 		x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
852 	else
853 		x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all);
854 	list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
855 		if (x->dead)
856 			continue;
857 		pol = container_of(x, struct xfrm_policy, walk);
858 		if (walk->type != XFRM_POLICY_TYPE_ANY &&
859 		    walk->type != pol->type)
860 			continue;
861 		error = func(pol, xfrm_policy_id2dir(pol->index),
862 			     walk->seq, data);
863 		if (error) {
864 			list_move_tail(&walk->walk.all, &x->all);
865 			goto out;
866 		}
867 		walk->seq++;
868 	}
869 	if (walk->seq == 0) {
870 		error = -ENOENT;
871 		goto out;
872 	}
873 	list_del_init(&walk->walk.all);
874 out:
875 	write_unlock_bh(&xfrm_policy_lock);
876 	return error;
877 }
878 EXPORT_SYMBOL(xfrm_policy_walk);
879 
880 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
881 {
882 	INIT_LIST_HEAD(&walk->walk.all);
883 	walk->walk.dead = 1;
884 	walk->type = type;
885 	walk->seq = 0;
886 }
887 EXPORT_SYMBOL(xfrm_policy_walk_init);
888 
889 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk)
890 {
891 	if (list_empty(&walk->walk.all))
892 		return;
893 
894 	write_lock_bh(&xfrm_policy_lock);
895 	list_del(&walk->walk.all);
896 	write_unlock_bh(&xfrm_policy_lock);
897 }
898 EXPORT_SYMBOL(xfrm_policy_walk_done);
899 
900 /*
901  * Find policy to apply to this flow.
902  *
903  * Returns 0 if policy found, else an -errno.
904  */
905 static int xfrm_policy_match(struct xfrm_policy *pol, struct flowi *fl,
906 			     u8 type, u16 family, int dir)
907 {
908 	struct xfrm_selector *sel = &pol->selector;
909 	int match, ret = -ESRCH;
910 
911 	if (pol->family != family ||
912 	    pol->type != type)
913 		return ret;
914 
915 	match = xfrm_selector_match(sel, fl, family);
916 	if (match)
917 		ret = security_xfrm_policy_lookup(pol->security, fl->secid,
918 						  dir);
919 
920 	return ret;
921 }
922 
923 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
924 						     struct flowi *fl,
925 						     u16 family, u8 dir)
926 {
927 	int err;
928 	struct xfrm_policy *pol, *ret;
929 	xfrm_address_t *daddr, *saddr;
930 	struct hlist_node *entry;
931 	struct hlist_head *chain;
932 	u32 priority = ~0U;
933 
934 	daddr = xfrm_flowi_daddr(fl, family);
935 	saddr = xfrm_flowi_saddr(fl, family);
936 	if (unlikely(!daddr || !saddr))
937 		return NULL;
938 
939 	read_lock_bh(&xfrm_policy_lock);
940 	chain = policy_hash_direct(net, daddr, saddr, family, dir);
941 	ret = NULL;
942 	hlist_for_each_entry(pol, entry, chain, bydst) {
943 		err = xfrm_policy_match(pol, fl, type, family, dir);
944 		if (err) {
945 			if (err == -ESRCH)
946 				continue;
947 			else {
948 				ret = ERR_PTR(err);
949 				goto fail;
950 			}
951 		} else {
952 			ret = pol;
953 			priority = ret->priority;
954 			break;
955 		}
956 	}
957 	chain = &net->xfrm.policy_inexact[dir];
958 	hlist_for_each_entry(pol, entry, chain, bydst) {
959 		err = xfrm_policy_match(pol, fl, type, family, dir);
960 		if (err) {
961 			if (err == -ESRCH)
962 				continue;
963 			else {
964 				ret = ERR_PTR(err);
965 				goto fail;
966 			}
967 		} else if (pol->priority < priority) {
968 			ret = pol;
969 			break;
970 		}
971 	}
972 	if (ret)
973 		xfrm_pol_hold(ret);
974 fail:
975 	read_unlock_bh(&xfrm_policy_lock);
976 
977 	return ret;
978 }
979 
980 static int xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
981 			      u8 dir, void **objp, atomic_t **obj_refp)
982 {
983 	struct xfrm_policy *pol;
984 	int err = 0;
985 
986 #ifdef CONFIG_XFRM_SUB_POLICY
987 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
988 	if (IS_ERR(pol)) {
989 		err = PTR_ERR(pol);
990 		pol = NULL;
991 	}
992 	if (pol || err)
993 		goto end;
994 #endif
995 	pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
996 	if (IS_ERR(pol)) {
997 		err = PTR_ERR(pol);
998 		pol = NULL;
999 	}
1000 #ifdef CONFIG_XFRM_SUB_POLICY
1001 end:
1002 #endif
1003 	if ((*objp = (void *) pol) != NULL)
1004 		*obj_refp = &pol->refcnt;
1005 	return err;
1006 }
1007 
1008 static inline int policy_to_flow_dir(int dir)
1009 {
1010 	if (XFRM_POLICY_IN == FLOW_DIR_IN &&
1011 	    XFRM_POLICY_OUT == FLOW_DIR_OUT &&
1012 	    XFRM_POLICY_FWD == FLOW_DIR_FWD)
1013 		return dir;
1014 	switch (dir) {
1015 	default:
1016 	case XFRM_POLICY_IN:
1017 		return FLOW_DIR_IN;
1018 	case XFRM_POLICY_OUT:
1019 		return FLOW_DIR_OUT;
1020 	case XFRM_POLICY_FWD:
1021 		return FLOW_DIR_FWD;
1022 	}
1023 }
1024 
1025 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl)
1026 {
1027 	struct xfrm_policy *pol;
1028 
1029 	read_lock_bh(&xfrm_policy_lock);
1030 	if ((pol = sk->sk_policy[dir]) != NULL) {
1031 		int match = xfrm_selector_match(&pol->selector, fl,
1032 						sk->sk_family);
1033 		int err = 0;
1034 
1035 		if (match) {
1036 			err = security_xfrm_policy_lookup(pol->security,
1037 						      fl->secid,
1038 						      policy_to_flow_dir(dir));
1039 			if (!err)
1040 				xfrm_pol_hold(pol);
1041 			else if (err == -ESRCH)
1042 				pol = NULL;
1043 			else
1044 				pol = ERR_PTR(err);
1045 		} else
1046 			pol = NULL;
1047 	}
1048 	read_unlock_bh(&xfrm_policy_lock);
1049 	return pol;
1050 }
1051 
1052 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
1053 {
1054 	struct net *net = xp_net(pol);
1055 	struct hlist_head *chain = policy_hash_bysel(net, &pol->selector,
1056 						     pol->family, dir);
1057 
1058 	list_add(&pol->walk.all, &net->xfrm.policy_all);
1059 	hlist_add_head(&pol->bydst, chain);
1060 	hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index));
1061 	net->xfrm.policy_count[dir]++;
1062 	xfrm_pol_hold(pol);
1063 
1064 	if (xfrm_bydst_should_resize(net, dir, NULL))
1065 		schedule_work(&net->xfrm.policy_hash_work);
1066 }
1067 
1068 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
1069 						int dir)
1070 {
1071 	struct net *net = xp_net(pol);
1072 
1073 	if (hlist_unhashed(&pol->bydst))
1074 		return NULL;
1075 
1076 	hlist_del(&pol->bydst);
1077 	hlist_del(&pol->byidx);
1078 	list_del(&pol->walk.all);
1079 	net->xfrm.policy_count[dir]--;
1080 
1081 	return pol;
1082 }
1083 
1084 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1085 {
1086 	write_lock_bh(&xfrm_policy_lock);
1087 	pol = __xfrm_policy_unlink(pol, dir);
1088 	write_unlock_bh(&xfrm_policy_lock);
1089 	if (pol) {
1090 		if (dir < XFRM_POLICY_MAX)
1091 			atomic_inc(&flow_cache_genid);
1092 		xfrm_policy_kill(pol);
1093 		return 0;
1094 	}
1095 	return -ENOENT;
1096 }
1097 EXPORT_SYMBOL(xfrm_policy_delete);
1098 
1099 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1100 {
1101 	struct net *net = xp_net(pol);
1102 	struct xfrm_policy *old_pol;
1103 
1104 #ifdef CONFIG_XFRM_SUB_POLICY
1105 	if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
1106 		return -EINVAL;
1107 #endif
1108 
1109 	write_lock_bh(&xfrm_policy_lock);
1110 	old_pol = sk->sk_policy[dir];
1111 	sk->sk_policy[dir] = pol;
1112 	if (pol) {
1113 		pol->curlft.add_time = get_seconds();
1114 		pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir);
1115 		__xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1116 	}
1117 	if (old_pol)
1118 		__xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1119 	write_unlock_bh(&xfrm_policy_lock);
1120 
1121 	if (old_pol) {
1122 		xfrm_policy_kill(old_pol);
1123 	}
1124 	return 0;
1125 }
1126 
1127 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir)
1128 {
1129 	struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
1130 
1131 	if (newp) {
1132 		newp->selector = old->selector;
1133 		if (security_xfrm_policy_clone(old->security,
1134 					       &newp->security)) {
1135 			kfree(newp);
1136 			return NULL;  /* ENOMEM */
1137 		}
1138 		newp->lft = old->lft;
1139 		newp->curlft = old->curlft;
1140 		newp->action = old->action;
1141 		newp->flags = old->flags;
1142 		newp->xfrm_nr = old->xfrm_nr;
1143 		newp->index = old->index;
1144 		newp->type = old->type;
1145 		memcpy(newp->xfrm_vec, old->xfrm_vec,
1146 		       newp->xfrm_nr*sizeof(struct xfrm_tmpl));
1147 		write_lock_bh(&xfrm_policy_lock);
1148 		__xfrm_policy_link(newp, XFRM_POLICY_MAX+dir);
1149 		write_unlock_bh(&xfrm_policy_lock);
1150 		xfrm_pol_put(newp);
1151 	}
1152 	return newp;
1153 }
1154 
1155 int __xfrm_sk_clone_policy(struct sock *sk)
1156 {
1157 	struct xfrm_policy *p0 = sk->sk_policy[0],
1158 			   *p1 = sk->sk_policy[1];
1159 
1160 	sk->sk_policy[0] = sk->sk_policy[1] = NULL;
1161 	if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL)
1162 		return -ENOMEM;
1163 	if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL)
1164 		return -ENOMEM;
1165 	return 0;
1166 }
1167 
1168 static int
1169 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote,
1170 	       unsigned short family)
1171 {
1172 	int err;
1173 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1174 
1175 	if (unlikely(afinfo == NULL))
1176 		return -EINVAL;
1177 	err = afinfo->get_saddr(net, local, remote);
1178 	xfrm_policy_put_afinfo(afinfo);
1179 	return err;
1180 }
1181 
1182 /* Resolve list of templates for the flow, given policy. */
1183 
1184 static int
1185 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, struct flowi *fl,
1186 		      struct xfrm_state **xfrm,
1187 		      unsigned short family)
1188 {
1189 	struct net *net = xp_net(policy);
1190 	int nx;
1191 	int i, error;
1192 	xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
1193 	xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
1194 	xfrm_address_t tmp;
1195 
1196 	for (nx=0, i = 0; i < policy->xfrm_nr; i++) {
1197 		struct xfrm_state *x;
1198 		xfrm_address_t *remote = daddr;
1199 		xfrm_address_t *local  = saddr;
1200 		struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
1201 
1202 		if (tmpl->mode == XFRM_MODE_TUNNEL ||
1203 		    tmpl->mode == XFRM_MODE_BEET) {
1204 			remote = &tmpl->id.daddr;
1205 			local = &tmpl->saddr;
1206 			family = tmpl->encap_family;
1207 			if (xfrm_addr_any(local, family)) {
1208 				error = xfrm_get_saddr(net, &tmp, remote, family);
1209 				if (error)
1210 					goto fail;
1211 				local = &tmp;
1212 			}
1213 		}
1214 
1215 		x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family);
1216 
1217 		if (x && x->km.state == XFRM_STATE_VALID) {
1218 			xfrm[nx++] = x;
1219 			daddr = remote;
1220 			saddr = local;
1221 			continue;
1222 		}
1223 		if (x) {
1224 			error = (x->km.state == XFRM_STATE_ERROR ?
1225 				 -EINVAL : -EAGAIN);
1226 			xfrm_state_put(x);
1227 		}
1228 		else if (error == -ESRCH)
1229 			error = -EAGAIN;
1230 
1231 		if (!tmpl->optional)
1232 			goto fail;
1233 	}
1234 	return nx;
1235 
1236 fail:
1237 	for (nx--; nx>=0; nx--)
1238 		xfrm_state_put(xfrm[nx]);
1239 	return error;
1240 }
1241 
1242 static int
1243 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
1244 		  struct xfrm_state **xfrm,
1245 		  unsigned short family)
1246 {
1247 	struct xfrm_state *tp[XFRM_MAX_DEPTH];
1248 	struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
1249 	int cnx = 0;
1250 	int error;
1251 	int ret;
1252 	int i;
1253 
1254 	for (i = 0; i < npols; i++) {
1255 		if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
1256 			error = -ENOBUFS;
1257 			goto fail;
1258 		}
1259 
1260 		ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
1261 		if (ret < 0) {
1262 			error = ret;
1263 			goto fail;
1264 		} else
1265 			cnx += ret;
1266 	}
1267 
1268 	/* found states are sorted for outbound processing */
1269 	if (npols > 1)
1270 		xfrm_state_sort(xfrm, tpp, cnx, family);
1271 
1272 	return cnx;
1273 
1274  fail:
1275 	for (cnx--; cnx>=0; cnx--)
1276 		xfrm_state_put(tpp[cnx]);
1277 	return error;
1278 
1279 }
1280 
1281 /* Check that the bundle accepts the flow and its components are
1282  * still valid.
1283  */
1284 
1285 static struct dst_entry *
1286 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
1287 {
1288 	struct dst_entry *x;
1289 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1290 	if (unlikely(afinfo == NULL))
1291 		return ERR_PTR(-EINVAL);
1292 	x = afinfo->find_bundle(fl, policy);
1293 	xfrm_policy_put_afinfo(afinfo);
1294 	return x;
1295 }
1296 
1297 static inline int xfrm_get_tos(struct flowi *fl, int family)
1298 {
1299 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1300 	int tos;
1301 
1302 	if (!afinfo)
1303 		return -EINVAL;
1304 
1305 	tos = afinfo->get_tos(fl);
1306 
1307 	xfrm_policy_put_afinfo(afinfo);
1308 
1309 	return tos;
1310 }
1311 
1312 static inline struct xfrm_dst *xfrm_alloc_dst(int family)
1313 {
1314 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1315 	struct xfrm_dst *xdst;
1316 
1317 	if (!afinfo)
1318 		return ERR_PTR(-EINVAL);
1319 
1320 	xdst = dst_alloc(afinfo->dst_ops) ?: ERR_PTR(-ENOBUFS);
1321 
1322 	xfrm_policy_put_afinfo(afinfo);
1323 
1324 	return xdst;
1325 }
1326 
1327 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
1328 				 int nfheader_len)
1329 {
1330 	struct xfrm_policy_afinfo *afinfo =
1331 		xfrm_policy_get_afinfo(dst->ops->family);
1332 	int err;
1333 
1334 	if (!afinfo)
1335 		return -EINVAL;
1336 
1337 	err = afinfo->init_path(path, dst, nfheader_len);
1338 
1339 	xfrm_policy_put_afinfo(afinfo);
1340 
1341 	return err;
1342 }
1343 
1344 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
1345 {
1346 	struct xfrm_policy_afinfo *afinfo =
1347 		xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
1348 	int err;
1349 
1350 	if (!afinfo)
1351 		return -EINVAL;
1352 
1353 	err = afinfo->fill_dst(xdst, dev);
1354 
1355 	xfrm_policy_put_afinfo(afinfo);
1356 
1357 	return err;
1358 }
1359 
1360 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
1361  * all the metrics... Shortly, bundle a bundle.
1362  */
1363 
1364 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1365 					    struct xfrm_state **xfrm, int nx,
1366 					    struct flowi *fl,
1367 					    struct dst_entry *dst)
1368 {
1369 	unsigned long now = jiffies;
1370 	struct net_device *dev;
1371 	struct dst_entry *dst_prev = NULL;
1372 	struct dst_entry *dst0 = NULL;
1373 	int i = 0;
1374 	int err;
1375 	int header_len = 0;
1376 	int nfheader_len = 0;
1377 	int trailer_len = 0;
1378 	int tos;
1379 	int family = policy->selector.family;
1380 	xfrm_address_t saddr, daddr;
1381 
1382 	xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
1383 
1384 	tos = xfrm_get_tos(fl, family);
1385 	err = tos;
1386 	if (tos < 0)
1387 		goto put_states;
1388 
1389 	dst_hold(dst);
1390 
1391 	for (; i < nx; i++) {
1392 		struct xfrm_dst *xdst = xfrm_alloc_dst(family);
1393 		struct dst_entry *dst1 = &xdst->u.dst;
1394 
1395 		err = PTR_ERR(xdst);
1396 		if (IS_ERR(xdst)) {
1397 			dst_release(dst);
1398 			goto put_states;
1399 		}
1400 
1401 		if (!dst_prev)
1402 			dst0 = dst1;
1403 		else {
1404 			dst_prev->child = dst_clone(dst1);
1405 			dst1->flags |= DST_NOHASH;
1406 		}
1407 
1408 		xdst->route = dst;
1409 		memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics));
1410 
1411 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
1412 			family = xfrm[i]->props.family;
1413 			dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr,
1414 					      family);
1415 			err = PTR_ERR(dst);
1416 			if (IS_ERR(dst))
1417 				goto put_states;
1418 		} else
1419 			dst_hold(dst);
1420 
1421 		dst1->xfrm = xfrm[i];
1422 		xdst->genid = xfrm[i]->genid;
1423 
1424 		dst1->obsolete = -1;
1425 		dst1->flags |= DST_HOST;
1426 		dst1->lastuse = now;
1427 
1428 		dst1->input = dst_discard;
1429 		dst1->output = xfrm[i]->outer_mode->afinfo->output;
1430 
1431 		dst1->next = dst_prev;
1432 		dst_prev = dst1;
1433 
1434 		header_len += xfrm[i]->props.header_len;
1435 		if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
1436 			nfheader_len += xfrm[i]->props.header_len;
1437 		trailer_len += xfrm[i]->props.trailer_len;
1438 	}
1439 
1440 	dst_prev->child = dst;
1441 	dst0->path = dst;
1442 
1443 	err = -ENODEV;
1444 	dev = dst->dev;
1445 	if (!dev)
1446 		goto free_dst;
1447 
1448 	/* Copy neighbout for reachability confirmation */
1449 	dst0->neighbour = neigh_clone(dst->neighbour);
1450 
1451 	xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
1452 	xfrm_init_pmtu(dst_prev);
1453 
1454 	for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
1455 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
1456 
1457 		err = xfrm_fill_dst(xdst, dev);
1458 		if (err)
1459 			goto free_dst;
1460 
1461 		dst_prev->header_len = header_len;
1462 		dst_prev->trailer_len = trailer_len;
1463 		header_len -= xdst->u.dst.xfrm->props.header_len;
1464 		trailer_len -= xdst->u.dst.xfrm->props.trailer_len;
1465 	}
1466 
1467 out:
1468 	return dst0;
1469 
1470 put_states:
1471 	for (; i < nx; i++)
1472 		xfrm_state_put(xfrm[i]);
1473 free_dst:
1474 	if (dst0)
1475 		dst_free(dst0);
1476 	dst0 = ERR_PTR(err);
1477 	goto out;
1478 }
1479 
1480 static int inline
1481 xfrm_dst_alloc_copy(void **target, void *src, int size)
1482 {
1483 	if (!*target) {
1484 		*target = kmalloc(size, GFP_ATOMIC);
1485 		if (!*target)
1486 			return -ENOMEM;
1487 	}
1488 	memcpy(*target, src, size);
1489 	return 0;
1490 }
1491 
1492 static int inline
1493 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
1494 {
1495 #ifdef CONFIG_XFRM_SUB_POLICY
1496 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1497 	return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1498 				   sel, sizeof(*sel));
1499 #else
1500 	return 0;
1501 #endif
1502 }
1503 
1504 static int inline
1505 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
1506 {
1507 #ifdef CONFIG_XFRM_SUB_POLICY
1508 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1509 	return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1510 #else
1511 	return 0;
1512 #endif
1513 }
1514 
1515 static int stale_bundle(struct dst_entry *dst);
1516 
1517 /* Main function: finds/creates a bundle for given flow.
1518  *
1519  * At the moment we eat a raw IP route. Mostly to speed up lookups
1520  * on interfaces with disabled IPsec.
1521  */
1522 int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
1523 		  struct sock *sk, int flags)
1524 {
1525 	struct xfrm_policy *policy;
1526 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1527 	int npols;
1528 	int pol_dead;
1529 	int xfrm_nr;
1530 	int pi;
1531 	struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1532 	struct dst_entry *dst, *dst_orig = *dst_p;
1533 	int nx = 0;
1534 	int err;
1535 	u32 genid;
1536 	u16 family;
1537 	u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1538 
1539 restart:
1540 	genid = atomic_read(&flow_cache_genid);
1541 	policy = NULL;
1542 	for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
1543 		pols[pi] = NULL;
1544 	npols = 0;
1545 	pol_dead = 0;
1546 	xfrm_nr = 0;
1547 
1548 	if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
1549 		policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
1550 		err = PTR_ERR(policy);
1551 		if (IS_ERR(policy)) {
1552 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1553 			goto dropdst;
1554 		}
1555 	}
1556 
1557 	if (!policy) {
1558 		/* To accelerate a bit...  */
1559 		if ((dst_orig->flags & DST_NOXFRM) ||
1560 		    !net->xfrm.policy_count[XFRM_POLICY_OUT])
1561 			goto nopol;
1562 
1563 		policy = flow_cache_lookup(net, fl, dst_orig->ops->family,
1564 					   dir, xfrm_policy_lookup);
1565 		err = PTR_ERR(policy);
1566 		if (IS_ERR(policy)) {
1567 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1568 			goto dropdst;
1569 		}
1570 	}
1571 
1572 	if (!policy)
1573 		goto nopol;
1574 
1575 	family = dst_orig->ops->family;
1576 	pols[0] = policy;
1577 	npols ++;
1578 	xfrm_nr += pols[0]->xfrm_nr;
1579 
1580 	err = -ENOENT;
1581 	if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
1582 		goto error;
1583 
1584 	policy->curlft.use_time = get_seconds();
1585 
1586 	switch (policy->action) {
1587 	default:
1588 	case XFRM_POLICY_BLOCK:
1589 		/* Prohibit the flow */
1590 		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
1591 		err = -EPERM;
1592 		goto error;
1593 
1594 	case XFRM_POLICY_ALLOW:
1595 #ifndef CONFIG_XFRM_SUB_POLICY
1596 		if (policy->xfrm_nr == 0) {
1597 			/* Flow passes not transformed. */
1598 			xfrm_pol_put(policy);
1599 			return 0;
1600 		}
1601 #endif
1602 
1603 		/* Try to find matching bundle.
1604 		 *
1605 		 * LATER: help from flow cache. It is optional, this
1606 		 * is required only for output policy.
1607 		 */
1608 		dst = xfrm_find_bundle(fl, policy, family);
1609 		if (IS_ERR(dst)) {
1610 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1611 			err = PTR_ERR(dst);
1612 			goto error;
1613 		}
1614 
1615 		if (dst)
1616 			break;
1617 
1618 #ifdef CONFIG_XFRM_SUB_POLICY
1619 		if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1620 			pols[1] = xfrm_policy_lookup_bytype(net,
1621 							    XFRM_POLICY_TYPE_MAIN,
1622 							    fl, family,
1623 							    XFRM_POLICY_OUT);
1624 			if (pols[1]) {
1625 				if (IS_ERR(pols[1])) {
1626 					XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1627 					err = PTR_ERR(pols[1]);
1628 					goto error;
1629 				}
1630 				if (pols[1]->action == XFRM_POLICY_BLOCK) {
1631 					XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
1632 					err = -EPERM;
1633 					goto error;
1634 				}
1635 				npols ++;
1636 				xfrm_nr += pols[1]->xfrm_nr;
1637 			}
1638 		}
1639 
1640 		/*
1641 		 * Because neither flowi nor bundle information knows about
1642 		 * transformation template size. On more than one policy usage
1643 		 * we can realize whether all of them is bypass or not after
1644 		 * they are searched. See above not-transformed bypass
1645 		 * is surrounded by non-sub policy configuration, too.
1646 		 */
1647 		if (xfrm_nr == 0) {
1648 			/* Flow passes not transformed. */
1649 			xfrm_pols_put(pols, npols);
1650 			return 0;
1651 		}
1652 
1653 #endif
1654 		nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1655 
1656 		if (unlikely(nx<0)) {
1657 			err = nx;
1658 			if (err == -EAGAIN && net->xfrm.sysctl_larval_drop) {
1659 				/* EREMOTE tells the caller to generate
1660 				 * a one-shot blackhole route.
1661 				 */
1662 				XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1663 				xfrm_pol_put(policy);
1664 				return -EREMOTE;
1665 			}
1666 			if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
1667 				DECLARE_WAITQUEUE(wait, current);
1668 
1669 				add_wait_queue(&net->xfrm.km_waitq, &wait);
1670 				set_current_state(TASK_INTERRUPTIBLE);
1671 				schedule();
1672 				set_current_state(TASK_RUNNING);
1673 				remove_wait_queue(&net->xfrm.km_waitq, &wait);
1674 
1675 				nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1676 
1677 				if (nx == -EAGAIN && signal_pending(current)) {
1678 					XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1679 					err = -ERESTART;
1680 					goto error;
1681 				}
1682 				if (nx == -EAGAIN ||
1683 				    genid != atomic_read(&flow_cache_genid)) {
1684 					xfrm_pols_put(pols, npols);
1685 					goto restart;
1686 				}
1687 				err = nx;
1688 			}
1689 			if (err < 0) {
1690 				XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1691 				goto error;
1692 			}
1693 		}
1694 		if (nx == 0) {
1695 			/* Flow passes not transformed. */
1696 			xfrm_pols_put(pols, npols);
1697 			return 0;
1698 		}
1699 
1700 		dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
1701 		err = PTR_ERR(dst);
1702 		if (IS_ERR(dst)) {
1703 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1704 			goto error;
1705 		}
1706 
1707 		for (pi = 0; pi < npols; pi++) {
1708 			read_lock_bh(&pols[pi]->lock);
1709 			pol_dead |= pols[pi]->walk.dead;
1710 			read_unlock_bh(&pols[pi]->lock);
1711 		}
1712 
1713 		write_lock_bh(&policy->lock);
1714 		if (unlikely(pol_dead || stale_bundle(dst))) {
1715 			/* Wow! While we worked on resolving, this
1716 			 * policy has gone. Retry. It is not paranoia,
1717 			 * we just cannot enlist new bundle to dead object.
1718 			 * We can't enlist stable bundles either.
1719 			 */
1720 			write_unlock_bh(&policy->lock);
1721 			dst_free(dst);
1722 
1723 			if (pol_dead)
1724 				XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLDEAD);
1725 			else
1726 				XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1727 			err = -EHOSTUNREACH;
1728 			goto error;
1729 		}
1730 
1731 		if (npols > 1)
1732 			err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1733 		else
1734 			err = xfrm_dst_update_origin(dst, fl);
1735 		if (unlikely(err)) {
1736 			write_unlock_bh(&policy->lock);
1737 			dst_free(dst);
1738 			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1739 			goto error;
1740 		}
1741 
1742 		dst->next = policy->bundles;
1743 		policy->bundles = dst;
1744 		dst_hold(dst);
1745 		write_unlock_bh(&policy->lock);
1746 	}
1747 	*dst_p = dst;
1748 	dst_release(dst_orig);
1749 	xfrm_pols_put(pols, npols);
1750 	return 0;
1751 
1752 error:
1753 	xfrm_pols_put(pols, npols);
1754 dropdst:
1755 	dst_release(dst_orig);
1756 	*dst_p = NULL;
1757 	return err;
1758 
1759 nopol:
1760 	err = -ENOENT;
1761 	if (flags & XFRM_LOOKUP_ICMP)
1762 		goto dropdst;
1763 	return 0;
1764 }
1765 EXPORT_SYMBOL(__xfrm_lookup);
1766 
1767 int xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
1768 		struct sock *sk, int flags)
1769 {
1770 	int err = __xfrm_lookup(net, dst_p, fl, sk, flags);
1771 
1772 	if (err == -EREMOTE) {
1773 		dst_release(*dst_p);
1774 		*dst_p = NULL;
1775 		err = -EAGAIN;
1776 	}
1777 
1778 	return err;
1779 }
1780 EXPORT_SYMBOL(xfrm_lookup);
1781 
1782 static inline int
1783 xfrm_secpath_reject(int idx, struct sk_buff *skb, struct flowi *fl)
1784 {
1785 	struct xfrm_state *x;
1786 
1787 	if (!skb->sp || idx < 0 || idx >= skb->sp->len)
1788 		return 0;
1789 	x = skb->sp->xvec[idx];
1790 	if (!x->type->reject)
1791 		return 0;
1792 	return x->type->reject(x, skb, fl);
1793 }
1794 
1795 /* When skb is transformed back to its "native" form, we have to
1796  * check policy restrictions. At the moment we make this in maximally
1797  * stupid way. Shame on me. :-) Of course, connected sockets must
1798  * have policy cached at them.
1799  */
1800 
1801 static inline int
1802 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
1803 	      unsigned short family)
1804 {
1805 	if (xfrm_state_kern(x))
1806 		return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
1807 	return	x->id.proto == tmpl->id.proto &&
1808 		(x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
1809 		(x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
1810 		x->props.mode == tmpl->mode &&
1811 		(tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
1812 		 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
1813 		!(x->props.mode != XFRM_MODE_TRANSPORT &&
1814 		  xfrm_state_addr_cmp(tmpl, x, family));
1815 }
1816 
1817 /*
1818  * 0 or more than 0 is returned when validation is succeeded (either bypass
1819  * because of optional transport mode, or next index of the mathced secpath
1820  * state with the template.
1821  * -1 is returned when no matching template is found.
1822  * Otherwise "-2 - errored_index" is returned.
1823  */
1824 static inline int
1825 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start,
1826 	       unsigned short family)
1827 {
1828 	int idx = start;
1829 
1830 	if (tmpl->optional) {
1831 		if (tmpl->mode == XFRM_MODE_TRANSPORT)
1832 			return start;
1833 	} else
1834 		start = -1;
1835 	for (; idx < sp->len; idx++) {
1836 		if (xfrm_state_ok(tmpl, sp->xvec[idx], family))
1837 			return ++idx;
1838 		if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
1839 			if (start == -1)
1840 				start = -2-idx;
1841 			break;
1842 		}
1843 	}
1844 	return start;
1845 }
1846 
1847 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1848 			  unsigned int family, int reverse)
1849 {
1850 	struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1851 	int err;
1852 
1853 	if (unlikely(afinfo == NULL))
1854 		return -EAFNOSUPPORT;
1855 
1856 	afinfo->decode_session(skb, fl, reverse);
1857 	err = security_xfrm_decode_session(skb, &fl->secid);
1858 	xfrm_policy_put_afinfo(afinfo);
1859 	return err;
1860 }
1861 EXPORT_SYMBOL(__xfrm_decode_session);
1862 
1863 static inline int secpath_has_nontransport(struct sec_path *sp, int k, int *idxp)
1864 {
1865 	for (; k < sp->len; k++) {
1866 		if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
1867 			*idxp = k;
1868 			return 1;
1869 		}
1870 	}
1871 
1872 	return 0;
1873 }
1874 
1875 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1876 			unsigned short family)
1877 {
1878 	struct net *net = dev_net(skb->dev);
1879 	struct xfrm_policy *pol;
1880 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1881 	int npols = 0;
1882 	int xfrm_nr;
1883 	int pi;
1884 	int reverse;
1885 	struct flowi fl;
1886 	u8 fl_dir;
1887 	int xerr_idx = -1;
1888 
1889 	reverse = dir & ~XFRM_POLICY_MASK;
1890 	dir &= XFRM_POLICY_MASK;
1891 	fl_dir = policy_to_flow_dir(dir);
1892 
1893 	if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) {
1894 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
1895 		return 0;
1896 	}
1897 
1898 	nf_nat_decode_session(skb, &fl, family);
1899 
1900 	/* First, check used SA against their selectors. */
1901 	if (skb->sp) {
1902 		int i;
1903 
1904 		for (i=skb->sp->len-1; i>=0; i--) {
1905 			struct xfrm_state *x = skb->sp->xvec[i];
1906 			if (!xfrm_selector_match(&x->sel, &fl, family)) {
1907 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
1908 				return 0;
1909 			}
1910 		}
1911 	}
1912 
1913 	pol = NULL;
1914 	if (sk && sk->sk_policy[dir]) {
1915 		pol = xfrm_sk_policy_lookup(sk, dir, &fl);
1916 		if (IS_ERR(pol)) {
1917 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
1918 			return 0;
1919 		}
1920 	}
1921 
1922 	if (!pol)
1923 		pol = flow_cache_lookup(net, &fl, family, fl_dir,
1924 					xfrm_policy_lookup);
1925 
1926 	if (IS_ERR(pol)) {
1927 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
1928 		return 0;
1929 	}
1930 
1931 	if (!pol) {
1932 		if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) {
1933 			xfrm_secpath_reject(xerr_idx, skb, &fl);
1934 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
1935 			return 0;
1936 		}
1937 		return 1;
1938 	}
1939 
1940 	pol->curlft.use_time = get_seconds();
1941 
1942 	pols[0] = pol;
1943 	npols ++;
1944 #ifdef CONFIG_XFRM_SUB_POLICY
1945 	if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1946 		pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
1947 						    &fl, family,
1948 						    XFRM_POLICY_IN);
1949 		if (pols[1]) {
1950 			if (IS_ERR(pols[1])) {
1951 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
1952 				return 0;
1953 			}
1954 			pols[1]->curlft.use_time = get_seconds();
1955 			npols ++;
1956 		}
1957 	}
1958 #endif
1959 
1960 	if (pol->action == XFRM_POLICY_ALLOW) {
1961 		struct sec_path *sp;
1962 		static struct sec_path dummy;
1963 		struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
1964 		struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
1965 		struct xfrm_tmpl **tpp = tp;
1966 		int ti = 0;
1967 		int i, k;
1968 
1969 		if ((sp = skb->sp) == NULL)
1970 			sp = &dummy;
1971 
1972 		for (pi = 0; pi < npols; pi++) {
1973 			if (pols[pi] != pol &&
1974 			    pols[pi]->action != XFRM_POLICY_ALLOW) {
1975 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
1976 				goto reject;
1977 			}
1978 			if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
1979 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
1980 				goto reject_error;
1981 			}
1982 			for (i = 0; i < pols[pi]->xfrm_nr; i++)
1983 				tpp[ti++] = &pols[pi]->xfrm_vec[i];
1984 		}
1985 		xfrm_nr = ti;
1986 		if (npols > 1) {
1987 			xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
1988 			tpp = stp;
1989 		}
1990 
1991 		/* For each tunnel xfrm, find the first matching tmpl.
1992 		 * For each tmpl before that, find corresponding xfrm.
1993 		 * Order is _important_. Later we will implement
1994 		 * some barriers, but at the moment barriers
1995 		 * are implied between each two transformations.
1996 		 */
1997 		for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
1998 			k = xfrm_policy_ok(tpp[i], sp, k, family);
1999 			if (k < 0) {
2000 				if (k < -1)
2001 					/* "-2 - errored_index" returned */
2002 					xerr_idx = -(2+k);
2003 				XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2004 				goto reject;
2005 			}
2006 		}
2007 
2008 		if (secpath_has_nontransport(sp, k, &xerr_idx)) {
2009 			XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
2010 			goto reject;
2011 		}
2012 
2013 		xfrm_pols_put(pols, npols);
2014 		return 1;
2015 	}
2016 	XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
2017 
2018 reject:
2019 	xfrm_secpath_reject(xerr_idx, skb, &fl);
2020 reject_error:
2021 	xfrm_pols_put(pols, npols);
2022 	return 0;
2023 }
2024 EXPORT_SYMBOL(__xfrm_policy_check);
2025 
2026 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
2027 {
2028 	struct net *net = dev_net(skb->dev);
2029 	struct flowi fl;
2030 	struct dst_entry *dst;
2031 	int res;
2032 
2033 	if (xfrm_decode_session(skb, &fl, family) < 0) {
2034 		/* XXX: we should have something like FWDHDRERROR here. */
2035 		XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
2036 		return 0;
2037 	}
2038 
2039 	dst = skb_dst(skb);
2040 
2041 	res = xfrm_lookup(net, &dst, &fl, NULL, 0) == 0;
2042 	skb_dst_set(skb, dst);
2043 	return res;
2044 }
2045 EXPORT_SYMBOL(__xfrm_route_forward);
2046 
2047 /* Optimize later using cookies and generation ids. */
2048 
2049 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
2050 {
2051 	/* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
2052 	 * to "-1" to force all XFRM destinations to get validated by
2053 	 * dst_ops->check on every use.  We do this because when a
2054 	 * normal route referenced by an XFRM dst is obsoleted we do
2055 	 * not go looking around for all parent referencing XFRM dsts
2056 	 * so that we can invalidate them.  It is just too much work.
2057 	 * Instead we make the checks here on every use.  For example:
2058 	 *
2059 	 *	XFRM dst A --> IPv4 dst X
2060 	 *
2061 	 * X is the "xdst->route" of A (X is also the "dst->path" of A
2062 	 * in this example).  If X is marked obsolete, "A" will not
2063 	 * notice.  That's what we are validating here via the
2064 	 * stale_bundle() check.
2065 	 *
2066 	 * When a policy's bundle is pruned, we dst_free() the XFRM
2067 	 * dst which causes it's ->obsolete field to be set to a
2068 	 * positive non-zero integer.  If an XFRM dst has been pruned
2069 	 * like this, we want to force a new route lookup.
2070 	 */
2071 	if (dst->obsolete < 0 && !stale_bundle(dst))
2072 		return dst;
2073 
2074 	return NULL;
2075 }
2076 
2077 static int stale_bundle(struct dst_entry *dst)
2078 {
2079 	return !xfrm_bundle_ok(NULL, (struct xfrm_dst *)dst, NULL, AF_UNSPEC, 0);
2080 }
2081 
2082 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
2083 {
2084 	while ((dst = dst->child) && dst->xfrm && dst->dev == dev) {
2085 		dst->dev = dev_net(dev)->loopback_dev;
2086 		dev_hold(dst->dev);
2087 		dev_put(dev);
2088 	}
2089 }
2090 EXPORT_SYMBOL(xfrm_dst_ifdown);
2091 
2092 static void xfrm_link_failure(struct sk_buff *skb)
2093 {
2094 	/* Impossible. Such dst must be popped before reaches point of failure. */
2095 	return;
2096 }
2097 
2098 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2099 {
2100 	if (dst) {
2101 		if (dst->obsolete) {
2102 			dst_release(dst);
2103 			dst = NULL;
2104 		}
2105 	}
2106 	return dst;
2107 }
2108 
2109 static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p)
2110 {
2111 	struct dst_entry *dst, **dstp;
2112 
2113 	write_lock(&pol->lock);
2114 	dstp = &pol->bundles;
2115 	while ((dst=*dstp) != NULL) {
2116 		if (func(dst)) {
2117 			*dstp = dst->next;
2118 			dst->next = *gc_list_p;
2119 			*gc_list_p = dst;
2120 		} else {
2121 			dstp = &dst->next;
2122 		}
2123 	}
2124 	write_unlock(&pol->lock);
2125 }
2126 
2127 static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *))
2128 {
2129 	struct dst_entry *gc_list = NULL;
2130 	int dir;
2131 
2132 	read_lock_bh(&xfrm_policy_lock);
2133 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2134 		struct xfrm_policy *pol;
2135 		struct hlist_node *entry;
2136 		struct hlist_head *table;
2137 		int i;
2138 
2139 		hlist_for_each_entry(pol, entry,
2140 				     &net->xfrm.policy_inexact[dir], bydst)
2141 			prune_one_bundle(pol, func, &gc_list);
2142 
2143 		table = net->xfrm.policy_bydst[dir].table;
2144 		for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) {
2145 			hlist_for_each_entry(pol, entry, table + i, bydst)
2146 				prune_one_bundle(pol, func, &gc_list);
2147 		}
2148 	}
2149 	read_unlock_bh(&xfrm_policy_lock);
2150 
2151 	while (gc_list) {
2152 		struct dst_entry *dst = gc_list;
2153 		gc_list = dst->next;
2154 		dst_free(dst);
2155 	}
2156 }
2157 
2158 static int unused_bundle(struct dst_entry *dst)
2159 {
2160 	return !atomic_read(&dst->__refcnt);
2161 }
2162 
2163 static void __xfrm_garbage_collect(struct net *net)
2164 {
2165 	xfrm_prune_bundles(net, unused_bundle);
2166 }
2167 
2168 static int xfrm_flush_bundles(struct net *net)
2169 {
2170 	xfrm_prune_bundles(net, stale_bundle);
2171 	return 0;
2172 }
2173 
2174 static void xfrm_init_pmtu(struct dst_entry *dst)
2175 {
2176 	do {
2177 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2178 		u32 pmtu, route_mtu_cached;
2179 
2180 		pmtu = dst_mtu(dst->child);
2181 		xdst->child_mtu_cached = pmtu;
2182 
2183 		pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
2184 
2185 		route_mtu_cached = dst_mtu(xdst->route);
2186 		xdst->route_mtu_cached = route_mtu_cached;
2187 
2188 		if (pmtu > route_mtu_cached)
2189 			pmtu = route_mtu_cached;
2190 
2191 		dst->metrics[RTAX_MTU-1] = pmtu;
2192 	} while ((dst = dst->next));
2193 }
2194 
2195 /* Check that the bundle accepts the flow and its components are
2196  * still valid.
2197  */
2198 
2199 int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
2200 		struct flowi *fl, int family, int strict)
2201 {
2202 	struct dst_entry *dst = &first->u.dst;
2203 	struct xfrm_dst *last;
2204 	u32 mtu;
2205 
2206 	if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) ||
2207 	    (dst->dev && !netif_running(dst->dev)))
2208 		return 0;
2209 #ifdef CONFIG_XFRM_SUB_POLICY
2210 	if (fl) {
2211 		if (first->origin && !flow_cache_uli_match(first->origin, fl))
2212 			return 0;
2213 		if (first->partner &&
2214 		    !xfrm_selector_match(first->partner, fl, family))
2215 			return 0;
2216 	}
2217 #endif
2218 
2219 	last = NULL;
2220 
2221 	do {
2222 		struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
2223 
2224 		if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family))
2225 			return 0;
2226 		if (fl && pol &&
2227 		    !security_xfrm_state_pol_flow_match(dst->xfrm, pol, fl))
2228 			return 0;
2229 		if (dst->xfrm->km.state != XFRM_STATE_VALID)
2230 			return 0;
2231 		if (xdst->genid != dst->xfrm->genid)
2232 			return 0;
2233 
2234 		if (strict && fl &&
2235 		    !(dst->xfrm->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
2236 		    !xfrm_state_addr_flow_check(dst->xfrm, fl, family))
2237 			return 0;
2238 
2239 		mtu = dst_mtu(dst->child);
2240 		if (xdst->child_mtu_cached != mtu) {
2241 			last = xdst;
2242 			xdst->child_mtu_cached = mtu;
2243 		}
2244 
2245 		if (!dst_check(xdst->route, xdst->route_cookie))
2246 			return 0;
2247 		mtu = dst_mtu(xdst->route);
2248 		if (xdst->route_mtu_cached != mtu) {
2249 			last = xdst;
2250 			xdst->route_mtu_cached = mtu;
2251 		}
2252 
2253 		dst = dst->child;
2254 	} while (dst->xfrm);
2255 
2256 	if (likely(!last))
2257 		return 1;
2258 
2259 	mtu = last->child_mtu_cached;
2260 	for (;;) {
2261 		dst = &last->u.dst;
2262 
2263 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
2264 		if (mtu > last->route_mtu_cached)
2265 			mtu = last->route_mtu_cached;
2266 		dst->metrics[RTAX_MTU-1] = mtu;
2267 
2268 		if (last == first)
2269 			break;
2270 
2271 		last = (struct xfrm_dst *)last->u.dst.next;
2272 		last->child_mtu_cached = mtu;
2273 	}
2274 
2275 	return 1;
2276 }
2277 
2278 EXPORT_SYMBOL(xfrm_bundle_ok);
2279 
2280 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
2281 {
2282 	int err = 0;
2283 	if (unlikely(afinfo == NULL))
2284 		return -EINVAL;
2285 	if (unlikely(afinfo->family >= NPROTO))
2286 		return -EAFNOSUPPORT;
2287 	write_lock_bh(&xfrm_policy_afinfo_lock);
2288 	if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL))
2289 		err = -ENOBUFS;
2290 	else {
2291 		struct dst_ops *dst_ops = afinfo->dst_ops;
2292 		if (likely(dst_ops->kmem_cachep == NULL))
2293 			dst_ops->kmem_cachep = xfrm_dst_cache;
2294 		if (likely(dst_ops->check == NULL))
2295 			dst_ops->check = xfrm_dst_check;
2296 		if (likely(dst_ops->negative_advice == NULL))
2297 			dst_ops->negative_advice = xfrm_negative_advice;
2298 		if (likely(dst_ops->link_failure == NULL))
2299 			dst_ops->link_failure = xfrm_link_failure;
2300 		if (likely(afinfo->garbage_collect == NULL))
2301 			afinfo->garbage_collect = __xfrm_garbage_collect;
2302 		xfrm_policy_afinfo[afinfo->family] = afinfo;
2303 	}
2304 	write_unlock_bh(&xfrm_policy_afinfo_lock);
2305 	return err;
2306 }
2307 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
2308 
2309 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
2310 {
2311 	int err = 0;
2312 	if (unlikely(afinfo == NULL))
2313 		return -EINVAL;
2314 	if (unlikely(afinfo->family >= NPROTO))
2315 		return -EAFNOSUPPORT;
2316 	write_lock_bh(&xfrm_policy_afinfo_lock);
2317 	if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) {
2318 		if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo))
2319 			err = -EINVAL;
2320 		else {
2321 			struct dst_ops *dst_ops = afinfo->dst_ops;
2322 			xfrm_policy_afinfo[afinfo->family] = NULL;
2323 			dst_ops->kmem_cachep = NULL;
2324 			dst_ops->check = NULL;
2325 			dst_ops->negative_advice = NULL;
2326 			dst_ops->link_failure = NULL;
2327 			afinfo->garbage_collect = NULL;
2328 		}
2329 	}
2330 	write_unlock_bh(&xfrm_policy_afinfo_lock);
2331 	return err;
2332 }
2333 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
2334 
2335 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
2336 {
2337 	struct xfrm_policy_afinfo *afinfo;
2338 	if (unlikely(family >= NPROTO))
2339 		return NULL;
2340 	read_lock(&xfrm_policy_afinfo_lock);
2341 	afinfo = xfrm_policy_afinfo[family];
2342 	if (unlikely(!afinfo))
2343 		read_unlock(&xfrm_policy_afinfo_lock);
2344 	return afinfo;
2345 }
2346 
2347 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo)
2348 {
2349 	read_unlock(&xfrm_policy_afinfo_lock);
2350 }
2351 
2352 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
2353 {
2354 	struct net_device *dev = ptr;
2355 
2356 	switch (event) {
2357 	case NETDEV_DOWN:
2358 		xfrm_flush_bundles(dev_net(dev));
2359 	}
2360 	return NOTIFY_DONE;
2361 }
2362 
2363 static struct notifier_block xfrm_dev_notifier = {
2364 	.notifier_call	= xfrm_dev_event,
2365 };
2366 
2367 #ifdef CONFIG_XFRM_STATISTICS
2368 static int __net_init xfrm_statistics_init(struct net *net)
2369 {
2370 	int rv;
2371 
2372 	if (snmp_mib_init((void **)net->mib.xfrm_statistics,
2373 			  sizeof(struct linux_xfrm_mib)) < 0)
2374 		return -ENOMEM;
2375 	rv = xfrm_proc_init(net);
2376 	if (rv < 0)
2377 		snmp_mib_free((void **)net->mib.xfrm_statistics);
2378 	return rv;
2379 }
2380 
2381 static void xfrm_statistics_fini(struct net *net)
2382 {
2383 	xfrm_proc_fini(net);
2384 	snmp_mib_free((void **)net->mib.xfrm_statistics);
2385 }
2386 #else
2387 static int __net_init xfrm_statistics_init(struct net *net)
2388 {
2389 	return 0;
2390 }
2391 
2392 static void xfrm_statistics_fini(struct net *net)
2393 {
2394 }
2395 #endif
2396 
2397 static int __net_init xfrm_policy_init(struct net *net)
2398 {
2399 	unsigned int hmask, sz;
2400 	int dir;
2401 
2402 	if (net_eq(net, &init_net))
2403 		xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache",
2404 					   sizeof(struct xfrm_dst),
2405 					   0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2406 					   NULL);
2407 
2408 	hmask = 8 - 1;
2409 	sz = (hmask+1) * sizeof(struct hlist_head);
2410 
2411 	net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
2412 	if (!net->xfrm.policy_byidx)
2413 		goto out_byidx;
2414 	net->xfrm.policy_idx_hmask = hmask;
2415 
2416 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2417 		struct xfrm_policy_hash *htab;
2418 
2419 		net->xfrm.policy_count[dir] = 0;
2420 		INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
2421 
2422 		htab = &net->xfrm.policy_bydst[dir];
2423 		htab->table = xfrm_hash_alloc(sz);
2424 		if (!htab->table)
2425 			goto out_bydst;
2426 		htab->hmask = hmask;
2427 	}
2428 
2429 	INIT_LIST_HEAD(&net->xfrm.policy_all);
2430 	INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
2431 	if (net_eq(net, &init_net))
2432 		register_netdevice_notifier(&xfrm_dev_notifier);
2433 	return 0;
2434 
2435 out_bydst:
2436 	for (dir--; dir >= 0; dir--) {
2437 		struct xfrm_policy_hash *htab;
2438 
2439 		htab = &net->xfrm.policy_bydst[dir];
2440 		xfrm_hash_free(htab->table, sz);
2441 	}
2442 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2443 out_byidx:
2444 	return -ENOMEM;
2445 }
2446 
2447 static void xfrm_policy_fini(struct net *net)
2448 {
2449 	struct xfrm_audit audit_info;
2450 	unsigned int sz;
2451 	int dir;
2452 
2453 	flush_work(&net->xfrm.policy_hash_work);
2454 #ifdef CONFIG_XFRM_SUB_POLICY
2455 	audit_info.loginuid = -1;
2456 	audit_info.sessionid = -1;
2457 	audit_info.secid = 0;
2458 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info);
2459 #endif
2460 	audit_info.loginuid = -1;
2461 	audit_info.sessionid = -1;
2462 	audit_info.secid = 0;
2463 	xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2464 	flush_work(&xfrm_policy_gc_work);
2465 
2466 	WARN_ON(!list_empty(&net->xfrm.policy_all));
2467 
2468 	for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2469 		struct xfrm_policy_hash *htab;
2470 
2471 		WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
2472 
2473 		htab = &net->xfrm.policy_bydst[dir];
2474 		sz = (htab->hmask + 1);
2475 		WARN_ON(!hlist_empty(htab->table));
2476 		xfrm_hash_free(htab->table, sz);
2477 	}
2478 
2479 	sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
2480 	WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
2481 	xfrm_hash_free(net->xfrm.policy_byidx, sz);
2482 }
2483 
2484 static int __net_init xfrm_net_init(struct net *net)
2485 {
2486 	int rv;
2487 
2488 	rv = xfrm_statistics_init(net);
2489 	if (rv < 0)
2490 		goto out_statistics;
2491 	rv = xfrm_state_init(net);
2492 	if (rv < 0)
2493 		goto out_state;
2494 	rv = xfrm_policy_init(net);
2495 	if (rv < 0)
2496 		goto out_policy;
2497 	rv = xfrm_sysctl_init(net);
2498 	if (rv < 0)
2499 		goto out_sysctl;
2500 	return 0;
2501 
2502 out_sysctl:
2503 	xfrm_policy_fini(net);
2504 out_policy:
2505 	xfrm_state_fini(net);
2506 out_state:
2507 	xfrm_statistics_fini(net);
2508 out_statistics:
2509 	return rv;
2510 }
2511 
2512 static void __net_exit xfrm_net_exit(struct net *net)
2513 {
2514 	xfrm_sysctl_fini(net);
2515 	xfrm_policy_fini(net);
2516 	xfrm_state_fini(net);
2517 	xfrm_statistics_fini(net);
2518 }
2519 
2520 static struct pernet_operations __net_initdata xfrm_net_ops = {
2521 	.init = xfrm_net_init,
2522 	.exit = xfrm_net_exit,
2523 };
2524 
2525 void __init xfrm_init(void)
2526 {
2527 	register_pernet_subsys(&xfrm_net_ops);
2528 	xfrm_input_init();
2529 }
2530 
2531 #ifdef CONFIG_AUDITSYSCALL
2532 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
2533 					 struct audit_buffer *audit_buf)
2534 {
2535 	struct xfrm_sec_ctx *ctx = xp->security;
2536 	struct xfrm_selector *sel = &xp->selector;
2537 
2538 	if (ctx)
2539 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
2540 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
2541 
2542 	switch(sel->family) {
2543 	case AF_INET:
2544 		audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
2545 		if (sel->prefixlen_s != 32)
2546 			audit_log_format(audit_buf, " src_prefixlen=%d",
2547 					 sel->prefixlen_s);
2548 		audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
2549 		if (sel->prefixlen_d != 32)
2550 			audit_log_format(audit_buf, " dst_prefixlen=%d",
2551 					 sel->prefixlen_d);
2552 		break;
2553 	case AF_INET6:
2554 		audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
2555 		if (sel->prefixlen_s != 128)
2556 			audit_log_format(audit_buf, " src_prefixlen=%d",
2557 					 sel->prefixlen_s);
2558 		audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
2559 		if (sel->prefixlen_d != 128)
2560 			audit_log_format(audit_buf, " dst_prefixlen=%d",
2561 					 sel->prefixlen_d);
2562 		break;
2563 	}
2564 }
2565 
2566 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
2567 			   uid_t auid, u32 sessionid, u32 secid)
2568 {
2569 	struct audit_buffer *audit_buf;
2570 
2571 	audit_buf = xfrm_audit_start("SPD-add");
2572 	if (audit_buf == NULL)
2573 		return;
2574 	xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2575 	audit_log_format(audit_buf, " res=%u", result);
2576 	xfrm_audit_common_policyinfo(xp, audit_buf);
2577 	audit_log_end(audit_buf);
2578 }
2579 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
2580 
2581 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
2582 			      uid_t auid, u32 sessionid, u32 secid)
2583 {
2584 	struct audit_buffer *audit_buf;
2585 
2586 	audit_buf = xfrm_audit_start("SPD-delete");
2587 	if (audit_buf == NULL)
2588 		return;
2589 	xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
2590 	audit_log_format(audit_buf, " res=%u", result);
2591 	xfrm_audit_common_policyinfo(xp, audit_buf);
2592 	audit_log_end(audit_buf);
2593 }
2594 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
2595 #endif
2596 
2597 #ifdef CONFIG_XFRM_MIGRATE
2598 static int xfrm_migrate_selector_match(struct xfrm_selector *sel_cmp,
2599 				       struct xfrm_selector *sel_tgt)
2600 {
2601 	if (sel_cmp->proto == IPSEC_ULPROTO_ANY) {
2602 		if (sel_tgt->family == sel_cmp->family &&
2603 		    xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr,
2604 				  sel_cmp->family) == 0 &&
2605 		    xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr,
2606 				  sel_cmp->family) == 0 &&
2607 		    sel_tgt->prefixlen_d == sel_cmp->prefixlen_d &&
2608 		    sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) {
2609 			return 1;
2610 		}
2611 	} else {
2612 		if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) {
2613 			return 1;
2614 		}
2615 	}
2616 	return 0;
2617 }
2618 
2619 static struct xfrm_policy * xfrm_migrate_policy_find(struct xfrm_selector *sel,
2620 						     u8 dir, u8 type)
2621 {
2622 	struct xfrm_policy *pol, *ret = NULL;
2623 	struct hlist_node *entry;
2624 	struct hlist_head *chain;
2625 	u32 priority = ~0U;
2626 
2627 	read_lock_bh(&xfrm_policy_lock);
2628 	chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir);
2629 	hlist_for_each_entry(pol, entry, chain, bydst) {
2630 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2631 		    pol->type == type) {
2632 			ret = pol;
2633 			priority = ret->priority;
2634 			break;
2635 		}
2636 	}
2637 	chain = &init_net.xfrm.policy_inexact[dir];
2638 	hlist_for_each_entry(pol, entry, chain, bydst) {
2639 		if (xfrm_migrate_selector_match(sel, &pol->selector) &&
2640 		    pol->type == type &&
2641 		    pol->priority < priority) {
2642 			ret = pol;
2643 			break;
2644 		}
2645 	}
2646 
2647 	if (ret)
2648 		xfrm_pol_hold(ret);
2649 
2650 	read_unlock_bh(&xfrm_policy_lock);
2651 
2652 	return ret;
2653 }
2654 
2655 static int migrate_tmpl_match(struct xfrm_migrate *m, struct xfrm_tmpl *t)
2656 {
2657 	int match = 0;
2658 
2659 	if (t->mode == m->mode && t->id.proto == m->proto &&
2660 	    (m->reqid == 0 || t->reqid == m->reqid)) {
2661 		switch (t->mode) {
2662 		case XFRM_MODE_TUNNEL:
2663 		case XFRM_MODE_BEET:
2664 			if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr,
2665 					  m->old_family) == 0 &&
2666 			    xfrm_addr_cmp(&t->saddr, &m->old_saddr,
2667 					  m->old_family) == 0) {
2668 				match = 1;
2669 			}
2670 			break;
2671 		case XFRM_MODE_TRANSPORT:
2672 			/* in case of transport mode, template does not store
2673 			   any IP addresses, hence we just compare mode and
2674 			   protocol */
2675 			match = 1;
2676 			break;
2677 		default:
2678 			break;
2679 		}
2680 	}
2681 	return match;
2682 }
2683 
2684 /* update endpoint address(es) of template(s) */
2685 static int xfrm_policy_migrate(struct xfrm_policy *pol,
2686 			       struct xfrm_migrate *m, int num_migrate)
2687 {
2688 	struct xfrm_migrate *mp;
2689 	struct dst_entry *dst;
2690 	int i, j, n = 0;
2691 
2692 	write_lock_bh(&pol->lock);
2693 	if (unlikely(pol->walk.dead)) {
2694 		/* target policy has been deleted */
2695 		write_unlock_bh(&pol->lock);
2696 		return -ENOENT;
2697 	}
2698 
2699 	for (i = 0; i < pol->xfrm_nr; i++) {
2700 		for (j = 0, mp = m; j < num_migrate; j++, mp++) {
2701 			if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
2702 				continue;
2703 			n++;
2704 			if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
2705 			    pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
2706 				continue;
2707 			/* update endpoints */
2708 			memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
2709 			       sizeof(pol->xfrm_vec[i].id.daddr));
2710 			memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
2711 			       sizeof(pol->xfrm_vec[i].saddr));
2712 			pol->xfrm_vec[i].encap_family = mp->new_family;
2713 			/* flush bundles */
2714 			while ((dst = pol->bundles) != NULL) {
2715 				pol->bundles = dst->next;
2716 				dst_free(dst);
2717 			}
2718 		}
2719 	}
2720 
2721 	write_unlock_bh(&pol->lock);
2722 
2723 	if (!n)
2724 		return -ENODATA;
2725 
2726 	return 0;
2727 }
2728 
2729 static int xfrm_migrate_check(struct xfrm_migrate *m, int num_migrate)
2730 {
2731 	int i, j;
2732 
2733 	if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH)
2734 		return -EINVAL;
2735 
2736 	for (i = 0; i < num_migrate; i++) {
2737 		if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr,
2738 				   m[i].old_family) == 0) &&
2739 		    (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr,
2740 				   m[i].old_family) == 0))
2741 			return -EINVAL;
2742 		if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
2743 		    xfrm_addr_any(&m[i].new_saddr, m[i].new_family))
2744 			return -EINVAL;
2745 
2746 		/* check if there is any duplicated entry */
2747 		for (j = i + 1; j < num_migrate; j++) {
2748 			if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
2749 				    sizeof(m[i].old_daddr)) &&
2750 			    !memcmp(&m[i].old_saddr, &m[j].old_saddr,
2751 				    sizeof(m[i].old_saddr)) &&
2752 			    m[i].proto == m[j].proto &&
2753 			    m[i].mode == m[j].mode &&
2754 			    m[i].reqid == m[j].reqid &&
2755 			    m[i].old_family == m[j].old_family)
2756 				return -EINVAL;
2757 		}
2758 	}
2759 
2760 	return 0;
2761 }
2762 
2763 int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
2764 		 struct xfrm_migrate *m, int num_migrate,
2765 		 struct xfrm_kmaddress *k)
2766 {
2767 	int i, err, nx_cur = 0, nx_new = 0;
2768 	struct xfrm_policy *pol = NULL;
2769 	struct xfrm_state *x, *xc;
2770 	struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
2771 	struct xfrm_state *x_new[XFRM_MAX_DEPTH];
2772 	struct xfrm_migrate *mp;
2773 
2774 	if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
2775 		goto out;
2776 
2777 	/* Stage 1 - find policy */
2778 	if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) {
2779 		err = -ENOENT;
2780 		goto out;
2781 	}
2782 
2783 	/* Stage 2 - find and update state(s) */
2784 	for (i = 0, mp = m; i < num_migrate; i++, mp++) {
2785 		if ((x = xfrm_migrate_state_find(mp))) {
2786 			x_cur[nx_cur] = x;
2787 			nx_cur++;
2788 			if ((xc = xfrm_state_migrate(x, mp))) {
2789 				x_new[nx_new] = xc;
2790 				nx_new++;
2791 			} else {
2792 				err = -ENODATA;
2793 				goto restore_state;
2794 			}
2795 		}
2796 	}
2797 
2798 	/* Stage 3 - update policy */
2799 	if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0)
2800 		goto restore_state;
2801 
2802 	/* Stage 4 - delete old state(s) */
2803 	if (nx_cur) {
2804 		xfrm_states_put(x_cur, nx_cur);
2805 		xfrm_states_delete(x_cur, nx_cur);
2806 	}
2807 
2808 	/* Stage 5 - announce */
2809 	km_migrate(sel, dir, type, m, num_migrate, k);
2810 
2811 	xfrm_pol_put(pol);
2812 
2813 	return 0;
2814 out:
2815 	return err;
2816 
2817 restore_state:
2818 	if (pol)
2819 		xfrm_pol_put(pol);
2820 	if (nx_cur)
2821 		xfrm_states_put(x_cur, nx_cur);
2822 	if (nx_new)
2823 		xfrm_states_delete(x_new, nx_new);
2824 
2825 	return err;
2826 }
2827 EXPORT_SYMBOL(xfrm_migrate);
2828 #endif
2829