1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * xfrm_policy.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * Kazunori MIYAZAWA @USAGI
11 * YOSHIFUJI Hideaki
12 * Split up af-specific portion
13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor
14 *
15 */
16
17 #include <linux/err.h>
18 #include <linux/slab.h>
19 #include <linux/kmod.h>
20 #include <linux/list.h>
21 #include <linux/spinlock.h>
22 #include <linux/workqueue.h>
23 #include <linux/notifier.h>
24 #include <linux/netdevice.h>
25 #include <linux/netfilter.h>
26 #include <linux/module.h>
27 #include <linux/cache.h>
28 #include <linux/cpu.h>
29 #include <linux/audit.h>
30 #include <linux/rhashtable.h>
31 #include <linux/if_tunnel.h>
32 #include <linux/icmp.h>
33 #include <net/dst.h>
34 #include <net/flow.h>
35 #include <net/inet_ecn.h>
36 #include <net/xfrm.h>
37 #include <net/ip.h>
38 #include <net/gre.h>
39 #if IS_ENABLED(CONFIG_IPV6_MIP6)
40 #include <net/mip6.h>
41 #endif
42 #ifdef CONFIG_XFRM_STATISTICS
43 #include <net/snmp.h>
44 #endif
45 #ifdef CONFIG_XFRM_ESPINTCP
46 #include <net/espintcp.h>
47 #endif
48 #include <net/inet_dscp.h>
49
50 #include "xfrm_hash.h"
51
52 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10))
53 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
54 #define XFRM_MAX_QUEUE_LEN 100
55
56 struct xfrm_flo {
57 struct dst_entry *dst_orig;
58 u8 flags;
59 };
60
61 /* prefixes smaller than this are stored in lists, not trees. */
62 #define INEXACT_PREFIXLEN_IPV4 16
63 #define INEXACT_PREFIXLEN_IPV6 48
64
65 struct xfrm_pol_inexact_node {
66 struct rb_node node;
67 union {
68 xfrm_address_t addr;
69 struct rcu_head rcu;
70 };
71 u8 prefixlen;
72
73 struct rb_root root;
74
75 /* the policies matching this node, can be empty list */
76 struct hlist_head hhead;
77 };
78
79 /* xfrm inexact policy search tree:
80 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id);
81 * |
82 * +---- root_d: sorted by daddr:prefix
83 * | |
84 * | xfrm_pol_inexact_node
85 * | |
86 * | +- root: sorted by saddr/prefix
87 * | | |
88 * | | xfrm_pol_inexact_node
89 * | | |
90 * | | + root: unused
91 * | | |
92 * | | + hhead: saddr:daddr policies
93 * | |
94 * | +- coarse policies and all any:daddr policies
95 * |
96 * +---- root_s: sorted by saddr:prefix
97 * | |
98 * | xfrm_pol_inexact_node
99 * | |
100 * | + root: unused
101 * | |
102 * | + hhead: saddr:any policies
103 * |
104 * +---- coarse policies and all any:any policies
105 *
106 * Lookups return four candidate lists:
107 * 1. any:any list from top-level xfrm_pol_inexact_bin
108 * 2. any:daddr list from daddr tree
109 * 3. saddr:daddr list from 2nd level daddr tree
110 * 4. saddr:any list from saddr tree
111 *
112 * This result set then needs to be searched for the policy with
113 * the lowest priority. If two candidates have the same priority, the
114 * struct xfrm_policy pos member with the lower number is used.
115 *
116 * This replicates previous single-list-search algorithm which would
117 * return first matching policy in the (ordered-by-priority) list.
118 */
119
120 struct xfrm_pol_inexact_key {
121 possible_net_t net;
122 u32 if_id;
123 u16 family;
124 u8 dir, type;
125 };
126
127 struct xfrm_pol_inexact_bin {
128 struct xfrm_pol_inexact_key k;
129 struct rhash_head head;
130 /* list containing '*:*' policies */
131 struct hlist_head hhead;
132
133 seqcount_spinlock_t count;
134 /* tree sorted by daddr/prefix */
135 struct rb_root root_d;
136
137 /* tree sorted by saddr/prefix */
138 struct rb_root root_s;
139
140 /* slow path below */
141 struct list_head inexact_bins;
142 struct rcu_head rcu;
143 };
144
145 enum xfrm_pol_inexact_candidate_type {
146 XFRM_POL_CAND_BOTH,
147 XFRM_POL_CAND_SADDR,
148 XFRM_POL_CAND_DADDR,
149 XFRM_POL_CAND_ANY,
150
151 XFRM_POL_CAND_MAX,
152 };
153
154 struct xfrm_pol_inexact_candidates {
155 struct hlist_head *res[XFRM_POL_CAND_MAX];
156 };
157
158 struct xfrm_flow_keys {
159 struct flow_dissector_key_basic basic;
160 struct flow_dissector_key_control control;
161 union {
162 struct flow_dissector_key_ipv4_addrs ipv4;
163 struct flow_dissector_key_ipv6_addrs ipv6;
164 } addrs;
165 struct flow_dissector_key_ip ip;
166 struct flow_dissector_key_icmp icmp;
167 struct flow_dissector_key_ports ports;
168 struct flow_dissector_key_keyid gre;
169 };
170
171 static struct flow_dissector xfrm_session_dissector __ro_after_init;
172
173 static DEFINE_SPINLOCK(xfrm_if_cb_lock);
174 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly;
175
176 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
177 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1]
178 __read_mostly;
179
180 static struct kmem_cache *xfrm_dst_cache __ro_after_init;
181
182 static struct rhashtable xfrm_policy_inexact_table;
183 static const struct rhashtable_params xfrm_pol_inexact_params;
184
185 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr);
186 static int stale_bundle(struct dst_entry *dst);
187 static int xfrm_bundle_ok(struct xfrm_dst *xdst);
188 static void xfrm_policy_queue_process(struct timer_list *t);
189
190 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir);
191 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
192 int dir);
193
194 static struct xfrm_pol_inexact_bin *
195 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir,
196 u32 if_id);
197
198 static struct xfrm_pol_inexact_bin *
199 xfrm_policy_inexact_lookup_rcu(struct net *net,
200 u8 type, u16 family, u8 dir, u32 if_id);
201 static struct xfrm_policy *
202 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy,
203 bool excl);
204
205 static bool
206 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
207 struct xfrm_pol_inexact_bin *b,
208 const xfrm_address_t *saddr,
209 const xfrm_address_t *daddr);
210
xfrm_pol_hold_rcu(struct xfrm_policy * policy)211 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy)
212 {
213 return refcount_inc_not_zero(&policy->refcnt);
214 }
215
216 static inline bool
__xfrm4_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)217 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
218 {
219 const struct flowi4 *fl4 = &fl->u.ip4;
220
221 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) &&
222 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) &&
223 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) &&
224 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) &&
225 (fl4->flowi4_proto == sel->proto || !sel->proto) &&
226 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex);
227 }
228
229 static inline bool
__xfrm6_selector_match(const struct xfrm_selector * sel,const struct flowi * fl)230 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl)
231 {
232 const struct flowi6 *fl6 = &fl->u.ip6;
233
234 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) &&
235 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) &&
236 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) &&
237 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) &&
238 (fl6->flowi6_proto == sel->proto || !sel->proto) &&
239 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex);
240 }
241
xfrm_selector_match(const struct xfrm_selector * sel,const struct flowi * fl,unsigned short family)242 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl,
243 unsigned short family)
244 {
245 switch (family) {
246 case AF_INET:
247 return __xfrm4_selector_match(sel, fl);
248 case AF_INET6:
249 return __xfrm6_selector_match(sel, fl);
250 }
251 return false;
252 }
253
xfrm_policy_get_afinfo(unsigned short family)254 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family)
255 {
256 const struct xfrm_policy_afinfo *afinfo;
257
258 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
259 return NULL;
260 rcu_read_lock();
261 afinfo = rcu_dereference(xfrm_policy_afinfo[family]);
262 if (unlikely(!afinfo))
263 rcu_read_unlock();
264 return afinfo;
265 }
266
267 /* Called with rcu_read_lock(). */
xfrm_if_get_cb(void)268 static const struct xfrm_if_cb *xfrm_if_get_cb(void)
269 {
270 return rcu_dereference(xfrm_if_cb);
271 }
272
__xfrm_dst_lookup(struct net * net,int tos,int oif,const xfrm_address_t * saddr,const xfrm_address_t * daddr,int family,u32 mark)273 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
274 const xfrm_address_t *saddr,
275 const xfrm_address_t *daddr,
276 int family, u32 mark)
277 {
278 const struct xfrm_policy_afinfo *afinfo;
279 struct dst_entry *dst;
280
281 afinfo = xfrm_policy_get_afinfo(family);
282 if (unlikely(afinfo == NULL))
283 return ERR_PTR(-EAFNOSUPPORT);
284
285 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
286
287 rcu_read_unlock();
288
289 return dst;
290 }
291 EXPORT_SYMBOL(__xfrm_dst_lookup);
292
xfrm_dst_lookup(struct xfrm_state * x,int tos,int oif,xfrm_address_t * prev_saddr,xfrm_address_t * prev_daddr,int family,u32 mark)293 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
294 int tos, int oif,
295 xfrm_address_t *prev_saddr,
296 xfrm_address_t *prev_daddr,
297 int family, u32 mark)
298 {
299 struct net *net = xs_net(x);
300 xfrm_address_t *saddr = &x->props.saddr;
301 xfrm_address_t *daddr = &x->id.daddr;
302 struct dst_entry *dst;
303
304 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) {
305 saddr = x->coaddr;
306 daddr = prev_daddr;
307 }
308 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) {
309 saddr = prev_saddr;
310 daddr = x->coaddr;
311 }
312
313 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
314
315 if (!IS_ERR(dst)) {
316 if (prev_saddr != saddr)
317 memcpy(prev_saddr, saddr, sizeof(*prev_saddr));
318 if (prev_daddr != daddr)
319 memcpy(prev_daddr, daddr, sizeof(*prev_daddr));
320 }
321
322 return dst;
323 }
324
make_jiffies(long secs)325 static inline unsigned long make_jiffies(long secs)
326 {
327 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ)
328 return MAX_SCHEDULE_TIMEOUT-1;
329 else
330 return secs*HZ;
331 }
332
xfrm_policy_timer(struct timer_list * t)333 static void xfrm_policy_timer(struct timer_list *t)
334 {
335 struct xfrm_policy *xp = from_timer(xp, t, timer);
336 time64_t now = ktime_get_real_seconds();
337 time64_t next = TIME64_MAX;
338 int warn = 0;
339 int dir;
340
341 read_lock(&xp->lock);
342
343 if (unlikely(xp->walk.dead))
344 goto out;
345
346 dir = xfrm_policy_id2dir(xp->index);
347
348 if (xp->lft.hard_add_expires_seconds) {
349 time64_t tmo = xp->lft.hard_add_expires_seconds +
350 xp->curlft.add_time - now;
351 if (tmo <= 0)
352 goto expired;
353 if (tmo < next)
354 next = tmo;
355 }
356 if (xp->lft.hard_use_expires_seconds) {
357 time64_t tmo = xp->lft.hard_use_expires_seconds +
358 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
359 if (tmo <= 0)
360 goto expired;
361 if (tmo < next)
362 next = tmo;
363 }
364 if (xp->lft.soft_add_expires_seconds) {
365 time64_t tmo = xp->lft.soft_add_expires_seconds +
366 xp->curlft.add_time - now;
367 if (tmo <= 0) {
368 warn = 1;
369 tmo = XFRM_KM_TIMEOUT;
370 }
371 if (tmo < next)
372 next = tmo;
373 }
374 if (xp->lft.soft_use_expires_seconds) {
375 time64_t tmo = xp->lft.soft_use_expires_seconds +
376 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now;
377 if (tmo <= 0) {
378 warn = 1;
379 tmo = XFRM_KM_TIMEOUT;
380 }
381 if (tmo < next)
382 next = tmo;
383 }
384
385 if (warn)
386 km_policy_expired(xp, dir, 0, 0);
387 if (next != TIME64_MAX &&
388 !mod_timer(&xp->timer, jiffies + make_jiffies(next)))
389 xfrm_pol_hold(xp);
390
391 out:
392 read_unlock(&xp->lock);
393 xfrm_pol_put(xp);
394 return;
395
396 expired:
397 read_unlock(&xp->lock);
398 if (!xfrm_policy_delete(xp, dir))
399 km_policy_expired(xp, dir, 1, 0);
400 xfrm_pol_put(xp);
401 }
402
403 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
404 * SPD calls.
405 */
406
xfrm_policy_alloc(struct net * net,gfp_t gfp)407 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
408 {
409 struct xfrm_policy *policy;
410
411 policy = kzalloc(sizeof(struct xfrm_policy), gfp);
412
413 if (policy) {
414 write_pnet(&policy->xp_net, net);
415 INIT_LIST_HEAD(&policy->walk.all);
416 INIT_HLIST_NODE(&policy->bydst);
417 INIT_HLIST_NODE(&policy->byidx);
418 rwlock_init(&policy->lock);
419 refcount_set(&policy->refcnt, 1);
420 skb_queue_head_init(&policy->polq.hold_queue);
421 timer_setup(&policy->timer, xfrm_policy_timer, 0);
422 timer_setup(&policy->polq.hold_timer,
423 xfrm_policy_queue_process, 0);
424 }
425 return policy;
426 }
427 EXPORT_SYMBOL(xfrm_policy_alloc);
428
xfrm_policy_destroy_rcu(struct rcu_head * head)429 static void xfrm_policy_destroy_rcu(struct rcu_head *head)
430 {
431 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu);
432
433 security_xfrm_policy_free(policy->security);
434 kfree(policy);
435 }
436
437 /* Destroy xfrm_policy: descendant resources must be released to this moment. */
438
xfrm_policy_destroy(struct xfrm_policy * policy)439 void xfrm_policy_destroy(struct xfrm_policy *policy)
440 {
441 BUG_ON(!policy->walk.dead);
442
443 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
444 BUG();
445
446 xfrm_dev_policy_free(policy);
447 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu);
448 }
449 EXPORT_SYMBOL(xfrm_policy_destroy);
450
451 /* Rule must be locked. Release descendant resources, announce
452 * entry dead. The rule must be unlinked from lists to the moment.
453 */
454
xfrm_policy_kill(struct xfrm_policy * policy)455 static void xfrm_policy_kill(struct xfrm_policy *policy)
456 {
457 xfrm_dev_policy_delete(policy);
458
459 write_lock_bh(&policy->lock);
460 policy->walk.dead = 1;
461 write_unlock_bh(&policy->lock);
462
463 atomic_inc(&policy->genid);
464
465 if (del_timer(&policy->polq.hold_timer))
466 xfrm_pol_put(policy);
467 skb_queue_purge(&policy->polq.hold_queue);
468
469 if (del_timer(&policy->timer))
470 xfrm_pol_put(policy);
471
472 xfrm_pol_put(policy);
473 }
474
475 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
476
idx_hash(struct net * net,u32 index)477 static inline unsigned int idx_hash(struct net *net, u32 index)
478 {
479 return __idx_hash(index, net->xfrm.policy_idx_hmask);
480 }
481
482 /* calculate policy hash thresholds */
__get_hash_thresh(struct net * net,unsigned short family,int dir,u8 * dbits,u8 * sbits)483 static void __get_hash_thresh(struct net *net,
484 unsigned short family, int dir,
485 u8 *dbits, u8 *sbits)
486 {
487 switch (family) {
488 case AF_INET:
489 *dbits = net->xfrm.policy_bydst[dir].dbits4;
490 *sbits = net->xfrm.policy_bydst[dir].sbits4;
491 break;
492
493 case AF_INET6:
494 *dbits = net->xfrm.policy_bydst[dir].dbits6;
495 *sbits = net->xfrm.policy_bydst[dir].sbits6;
496 break;
497
498 default:
499 *dbits = 0;
500 *sbits = 0;
501 }
502 }
503
policy_hash_bysel(struct net * net,const struct xfrm_selector * sel,unsigned short family,int dir)504 static struct hlist_head *policy_hash_bysel(struct net *net,
505 const struct xfrm_selector *sel,
506 unsigned short family, int dir)
507 {
508 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
509 unsigned int hash;
510 u8 dbits;
511 u8 sbits;
512
513 __get_hash_thresh(net, family, dir, &dbits, &sbits);
514 hash = __sel_hash(sel, family, hmask, dbits, sbits);
515
516 if (hash == hmask + 1)
517 return NULL;
518
519 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
520 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
521 }
522
policy_hash_direct(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family,int dir)523 static struct hlist_head *policy_hash_direct(struct net *net,
524 const xfrm_address_t *daddr,
525 const xfrm_address_t *saddr,
526 unsigned short family, int dir)
527 {
528 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
529 unsigned int hash;
530 u8 dbits;
531 u8 sbits;
532
533 __get_hash_thresh(net, family, dir, &dbits, &sbits);
534 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits);
535
536 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table,
537 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash;
538 }
539
xfrm_dst_hash_transfer(struct net * net,struct hlist_head * list,struct hlist_head * ndsttable,unsigned int nhashmask,int dir)540 static void xfrm_dst_hash_transfer(struct net *net,
541 struct hlist_head *list,
542 struct hlist_head *ndsttable,
543 unsigned int nhashmask,
544 int dir)
545 {
546 struct hlist_node *tmp, *entry0 = NULL;
547 struct xfrm_policy *pol;
548 unsigned int h0 = 0;
549 u8 dbits;
550 u8 sbits;
551
552 redo:
553 hlist_for_each_entry_safe(pol, tmp, list, bydst) {
554 unsigned int h;
555
556 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits);
557 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr,
558 pol->family, nhashmask, dbits, sbits);
559 if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
560 hlist_del_rcu(&pol->bydst);
561 hlist_add_head_rcu(&pol->bydst, ndsttable + h);
562 h0 = h;
563 } else {
564 if (h != h0)
565 continue;
566 hlist_del_rcu(&pol->bydst);
567 hlist_add_behind_rcu(&pol->bydst, entry0);
568 }
569 entry0 = &pol->bydst;
570 }
571 if (!hlist_empty(list)) {
572 entry0 = NULL;
573 goto redo;
574 }
575 }
576
xfrm_idx_hash_transfer(struct hlist_head * list,struct hlist_head * nidxtable,unsigned int nhashmask)577 static void xfrm_idx_hash_transfer(struct hlist_head *list,
578 struct hlist_head *nidxtable,
579 unsigned int nhashmask)
580 {
581 struct hlist_node *tmp;
582 struct xfrm_policy *pol;
583
584 hlist_for_each_entry_safe(pol, tmp, list, byidx) {
585 unsigned int h;
586
587 h = __idx_hash(pol->index, nhashmask);
588 hlist_add_head(&pol->byidx, nidxtable+h);
589 }
590 }
591
xfrm_new_hash_mask(unsigned int old_hmask)592 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask)
593 {
594 return ((old_hmask + 1) << 1) - 1;
595 }
596
xfrm_bydst_resize(struct net * net,int dir)597 static void xfrm_bydst_resize(struct net *net, int dir)
598 {
599 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
600 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
601 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
602 struct hlist_head *ndst = xfrm_hash_alloc(nsize);
603 struct hlist_head *odst;
604 int i;
605
606 if (!ndst)
607 return;
608
609 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
610 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
611
612 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table,
613 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
614
615 for (i = hmask; i >= 0; i--)
616 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir);
617
618 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst);
619 net->xfrm.policy_bydst[dir].hmask = nhashmask;
620
621 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
622 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
623
624 synchronize_rcu();
625
626 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head));
627 }
628
xfrm_byidx_resize(struct net * net)629 static void xfrm_byidx_resize(struct net *net)
630 {
631 unsigned int hmask = net->xfrm.policy_idx_hmask;
632 unsigned int nhashmask = xfrm_new_hash_mask(hmask);
633 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head);
634 struct hlist_head *oidx = net->xfrm.policy_byidx;
635 struct hlist_head *nidx = xfrm_hash_alloc(nsize);
636 int i;
637
638 if (!nidx)
639 return;
640
641 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
642
643 for (i = hmask; i >= 0; i--)
644 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask);
645
646 net->xfrm.policy_byidx = nidx;
647 net->xfrm.policy_idx_hmask = nhashmask;
648
649 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
650
651 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head));
652 }
653
xfrm_bydst_should_resize(struct net * net,int dir,int * total)654 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total)
655 {
656 unsigned int cnt = net->xfrm.policy_count[dir];
657 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask;
658
659 if (total)
660 *total += cnt;
661
662 if ((hmask + 1) < xfrm_policy_hashmax &&
663 cnt > hmask)
664 return 1;
665
666 return 0;
667 }
668
xfrm_byidx_should_resize(struct net * net,int total)669 static inline int xfrm_byidx_should_resize(struct net *net, int total)
670 {
671 unsigned int hmask = net->xfrm.policy_idx_hmask;
672
673 if ((hmask + 1) < xfrm_policy_hashmax &&
674 total > hmask)
675 return 1;
676
677 return 0;
678 }
679
xfrm_spd_getinfo(struct net * net,struct xfrmk_spdinfo * si)680 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si)
681 {
682 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN];
683 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT];
684 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD];
685 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX];
686 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX];
687 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX];
688 si->spdhcnt = net->xfrm.policy_idx_hmask;
689 si->spdhmcnt = xfrm_policy_hashmax;
690 }
691 EXPORT_SYMBOL(xfrm_spd_getinfo);
692
693 static DEFINE_MUTEX(hash_resize_mutex);
xfrm_hash_resize(struct work_struct * work)694 static void xfrm_hash_resize(struct work_struct *work)
695 {
696 struct net *net = container_of(work, struct net, xfrm.policy_hash_work);
697 int dir, total;
698
699 mutex_lock(&hash_resize_mutex);
700
701 total = 0;
702 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
703 if (xfrm_bydst_should_resize(net, dir, &total))
704 xfrm_bydst_resize(net, dir);
705 }
706 if (xfrm_byidx_should_resize(net, total))
707 xfrm_byidx_resize(net);
708
709 mutex_unlock(&hash_resize_mutex);
710 }
711
712 /* Make sure *pol can be inserted into fastbin.
713 * Useful to check that later insert requests will be successful
714 * (provided xfrm_policy_lock is held throughout).
715 */
716 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_alloc_bin(const struct xfrm_policy * pol,u8 dir)717 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir)
718 {
719 struct xfrm_pol_inexact_bin *bin, *prev;
720 struct xfrm_pol_inexact_key k = {
721 .family = pol->family,
722 .type = pol->type,
723 .dir = dir,
724 .if_id = pol->if_id,
725 };
726 struct net *net = xp_net(pol);
727
728 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
729
730 write_pnet(&k.net, net);
731 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k,
732 xfrm_pol_inexact_params);
733 if (bin)
734 return bin;
735
736 bin = kzalloc(sizeof(*bin), GFP_ATOMIC);
737 if (!bin)
738 return NULL;
739
740 bin->k = k;
741 INIT_HLIST_HEAD(&bin->hhead);
742 bin->root_d = RB_ROOT;
743 bin->root_s = RB_ROOT;
744 seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock);
745
746 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table,
747 &bin->k, &bin->head,
748 xfrm_pol_inexact_params);
749 if (!prev) {
750 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins);
751 return bin;
752 }
753
754 kfree(bin);
755
756 return IS_ERR(prev) ? NULL : prev;
757 }
758
xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t * addr,int family,u8 prefixlen)759 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr,
760 int family, u8 prefixlen)
761 {
762 if (xfrm_addr_any(addr, family))
763 return true;
764
765 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6)
766 return true;
767
768 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4)
769 return true;
770
771 return false;
772 }
773
774 static bool
xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy * policy)775 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy)
776 {
777 const xfrm_address_t *addr;
778 bool saddr_any, daddr_any;
779 u8 prefixlen;
780
781 addr = &policy->selector.saddr;
782 prefixlen = policy->selector.prefixlen_s;
783
784 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
785 policy->family,
786 prefixlen);
787 addr = &policy->selector.daddr;
788 prefixlen = policy->selector.prefixlen_d;
789 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr,
790 policy->family,
791 prefixlen);
792 return saddr_any && daddr_any;
793 }
794
xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node * node,const xfrm_address_t * addr,u8 prefixlen)795 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node,
796 const xfrm_address_t *addr, u8 prefixlen)
797 {
798 node->addr = *addr;
799 node->prefixlen = prefixlen;
800 }
801
802 static struct xfrm_pol_inexact_node *
xfrm_pol_inexact_node_alloc(const xfrm_address_t * addr,u8 prefixlen)803 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen)
804 {
805 struct xfrm_pol_inexact_node *node;
806
807 node = kzalloc(sizeof(*node), GFP_ATOMIC);
808 if (node)
809 xfrm_pol_inexact_node_init(node, addr, prefixlen);
810
811 return node;
812 }
813
xfrm_policy_addr_delta(const xfrm_address_t * a,const xfrm_address_t * b,u8 prefixlen,u16 family)814 static int xfrm_policy_addr_delta(const xfrm_address_t *a,
815 const xfrm_address_t *b,
816 u8 prefixlen, u16 family)
817 {
818 u32 ma, mb, mask;
819 unsigned int pdw, pbi;
820 int delta = 0;
821
822 switch (family) {
823 case AF_INET:
824 if (prefixlen == 0)
825 return 0;
826 mask = ~0U << (32 - prefixlen);
827 ma = ntohl(a->a4) & mask;
828 mb = ntohl(b->a4) & mask;
829 if (ma < mb)
830 delta = -1;
831 else if (ma > mb)
832 delta = 1;
833 break;
834 case AF_INET6:
835 pdw = prefixlen >> 5;
836 pbi = prefixlen & 0x1f;
837
838 if (pdw) {
839 delta = memcmp(a->a6, b->a6, pdw << 2);
840 if (delta)
841 return delta;
842 }
843 if (pbi) {
844 mask = ~0U << (32 - pbi);
845 ma = ntohl(a->a6[pdw]) & mask;
846 mb = ntohl(b->a6[pdw]) & mask;
847 if (ma < mb)
848 delta = -1;
849 else if (ma > mb)
850 delta = 1;
851 }
852 break;
853 default:
854 break;
855 }
856
857 return delta;
858 }
859
xfrm_policy_inexact_list_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,u16 family)860 static void xfrm_policy_inexact_list_reinsert(struct net *net,
861 struct xfrm_pol_inexact_node *n,
862 u16 family)
863 {
864 unsigned int matched_s, matched_d;
865 struct xfrm_policy *policy, *p;
866
867 matched_s = 0;
868 matched_d = 0;
869
870 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
871 struct hlist_node *newpos = NULL;
872 bool matches_s, matches_d;
873
874 if (policy->walk.dead || !policy->bydst_reinsert)
875 continue;
876
877 WARN_ON_ONCE(policy->family != family);
878
879 policy->bydst_reinsert = false;
880 hlist_for_each_entry(p, &n->hhead, bydst) {
881 if (policy->priority > p->priority)
882 newpos = &p->bydst;
883 else if (policy->priority == p->priority &&
884 policy->pos > p->pos)
885 newpos = &p->bydst;
886 else
887 break;
888 }
889
890 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
891 hlist_add_behind_rcu(&policy->bydst, newpos);
892 else
893 hlist_add_head_rcu(&policy->bydst, &n->hhead);
894
895 /* paranoia checks follow.
896 * Check that the reinserted policy matches at least
897 * saddr or daddr for current node prefix.
898 *
899 * Matching both is fine, matching saddr in one policy
900 * (but not daddr) and then matching only daddr in another
901 * is a bug.
902 */
903 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr,
904 &n->addr,
905 n->prefixlen,
906 family) == 0;
907 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr,
908 &n->addr,
909 n->prefixlen,
910 family) == 0;
911 if (matches_s && matches_d)
912 continue;
913
914 WARN_ON_ONCE(!matches_s && !matches_d);
915 if (matches_s)
916 matched_s++;
917 if (matches_d)
918 matched_d++;
919 WARN_ON_ONCE(matched_s && matched_d);
920 }
921 }
922
xfrm_policy_inexact_node_reinsert(struct net * net,struct xfrm_pol_inexact_node * n,struct rb_root * new,u16 family)923 static void xfrm_policy_inexact_node_reinsert(struct net *net,
924 struct xfrm_pol_inexact_node *n,
925 struct rb_root *new,
926 u16 family)
927 {
928 struct xfrm_pol_inexact_node *node;
929 struct rb_node **p, *parent;
930
931 /* we should not have another subtree here */
932 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root));
933 restart:
934 parent = NULL;
935 p = &new->rb_node;
936 while (*p) {
937 u8 prefixlen;
938 int delta;
939
940 parent = *p;
941 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
942
943 prefixlen = min(node->prefixlen, n->prefixlen);
944
945 delta = xfrm_policy_addr_delta(&n->addr, &node->addr,
946 prefixlen, family);
947 if (delta < 0) {
948 p = &parent->rb_left;
949 } else if (delta > 0) {
950 p = &parent->rb_right;
951 } else {
952 bool same_prefixlen = node->prefixlen == n->prefixlen;
953 struct xfrm_policy *tmp;
954
955 hlist_for_each_entry(tmp, &n->hhead, bydst) {
956 tmp->bydst_reinsert = true;
957 hlist_del_rcu(&tmp->bydst);
958 }
959
960 node->prefixlen = prefixlen;
961
962 xfrm_policy_inexact_list_reinsert(net, node, family);
963
964 if (same_prefixlen) {
965 kfree_rcu(n, rcu);
966 return;
967 }
968
969 rb_erase(*p, new);
970 kfree_rcu(n, rcu);
971 n = node;
972 goto restart;
973 }
974 }
975
976 rb_link_node_rcu(&n->node, parent, p);
977 rb_insert_color(&n->node, new);
978 }
979
980 /* merge nodes v and n */
xfrm_policy_inexact_node_merge(struct net * net,struct xfrm_pol_inexact_node * v,struct xfrm_pol_inexact_node * n,u16 family)981 static void xfrm_policy_inexact_node_merge(struct net *net,
982 struct xfrm_pol_inexact_node *v,
983 struct xfrm_pol_inexact_node *n,
984 u16 family)
985 {
986 struct xfrm_pol_inexact_node *node;
987 struct xfrm_policy *tmp;
988 struct rb_node *rnode;
989
990 /* To-be-merged node v has a subtree.
991 *
992 * Dismantle it and insert its nodes to n->root.
993 */
994 while ((rnode = rb_first(&v->root)) != NULL) {
995 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node);
996 rb_erase(&node->node, &v->root);
997 xfrm_policy_inexact_node_reinsert(net, node, &n->root,
998 family);
999 }
1000
1001 hlist_for_each_entry(tmp, &v->hhead, bydst) {
1002 tmp->bydst_reinsert = true;
1003 hlist_del_rcu(&tmp->bydst);
1004 }
1005
1006 xfrm_policy_inexact_list_reinsert(net, n, family);
1007 }
1008
1009 static struct xfrm_pol_inexact_node *
xfrm_policy_inexact_insert_node(struct net * net,struct rb_root * root,xfrm_address_t * addr,u16 family,u8 prefixlen,u8 dir)1010 xfrm_policy_inexact_insert_node(struct net *net,
1011 struct rb_root *root,
1012 xfrm_address_t *addr,
1013 u16 family, u8 prefixlen, u8 dir)
1014 {
1015 struct xfrm_pol_inexact_node *cached = NULL;
1016 struct rb_node **p, *parent = NULL;
1017 struct xfrm_pol_inexact_node *node;
1018
1019 p = &root->rb_node;
1020 while (*p) {
1021 int delta;
1022
1023 parent = *p;
1024 node = rb_entry(*p, struct xfrm_pol_inexact_node, node);
1025
1026 delta = xfrm_policy_addr_delta(addr, &node->addr,
1027 node->prefixlen,
1028 family);
1029 if (delta == 0 && prefixlen >= node->prefixlen) {
1030 WARN_ON_ONCE(cached); /* ipsec policies got lost */
1031 return node;
1032 }
1033
1034 if (delta < 0)
1035 p = &parent->rb_left;
1036 else
1037 p = &parent->rb_right;
1038
1039 if (prefixlen < node->prefixlen) {
1040 delta = xfrm_policy_addr_delta(addr, &node->addr,
1041 prefixlen,
1042 family);
1043 if (delta)
1044 continue;
1045
1046 /* This node is a subnet of the new prefix. It needs
1047 * to be removed and re-inserted with the smaller
1048 * prefix and all nodes that are now also covered
1049 * by the reduced prefixlen.
1050 */
1051 rb_erase(&node->node, root);
1052
1053 if (!cached) {
1054 xfrm_pol_inexact_node_init(node, addr,
1055 prefixlen);
1056 cached = node;
1057 } else {
1058 /* This node also falls within the new
1059 * prefixlen. Merge the to-be-reinserted
1060 * node and this one.
1061 */
1062 xfrm_policy_inexact_node_merge(net, node,
1063 cached, family);
1064 kfree_rcu(node, rcu);
1065 }
1066
1067 /* restart */
1068 p = &root->rb_node;
1069 parent = NULL;
1070 }
1071 }
1072
1073 node = cached;
1074 if (!node) {
1075 node = xfrm_pol_inexact_node_alloc(addr, prefixlen);
1076 if (!node)
1077 return NULL;
1078 }
1079
1080 rb_link_node_rcu(&node->node, parent, p);
1081 rb_insert_color(&node->node, root);
1082
1083 return node;
1084 }
1085
xfrm_policy_inexact_gc_tree(struct rb_root * r,bool rm)1086 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm)
1087 {
1088 struct xfrm_pol_inexact_node *node;
1089 struct rb_node *rn = rb_first(r);
1090
1091 while (rn) {
1092 node = rb_entry(rn, struct xfrm_pol_inexact_node, node);
1093
1094 xfrm_policy_inexact_gc_tree(&node->root, rm);
1095 rn = rb_next(rn);
1096
1097 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) {
1098 WARN_ON_ONCE(rm);
1099 continue;
1100 }
1101
1102 rb_erase(&node->node, r);
1103 kfree_rcu(node, rcu);
1104 }
1105 }
1106
__xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b,bool net_exit)1107 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit)
1108 {
1109 write_seqcount_begin(&b->count);
1110 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit);
1111 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit);
1112 write_seqcount_end(&b->count);
1113
1114 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) ||
1115 !hlist_empty(&b->hhead)) {
1116 WARN_ON_ONCE(net_exit);
1117 return;
1118 }
1119
1120 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head,
1121 xfrm_pol_inexact_params) == 0) {
1122 list_del(&b->inexact_bins);
1123 kfree_rcu(b, rcu);
1124 }
1125 }
1126
xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin * b)1127 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b)
1128 {
1129 struct net *net = read_pnet(&b->k.net);
1130
1131 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1132 __xfrm_policy_inexact_prune_bin(b, false);
1133 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1134 }
1135
__xfrm_policy_inexact_flush(struct net * net)1136 static void __xfrm_policy_inexact_flush(struct net *net)
1137 {
1138 struct xfrm_pol_inexact_bin *bin, *t;
1139
1140 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1141
1142 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins)
1143 __xfrm_policy_inexact_prune_bin(bin, false);
1144 }
1145
1146 static struct hlist_head *
xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin * bin,struct xfrm_policy * policy,u8 dir)1147 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin,
1148 struct xfrm_policy *policy, u8 dir)
1149 {
1150 struct xfrm_pol_inexact_node *n;
1151 struct net *net;
1152
1153 net = xp_net(policy);
1154 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1155
1156 if (xfrm_policy_inexact_insert_use_any_list(policy))
1157 return &bin->hhead;
1158
1159 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr,
1160 policy->family,
1161 policy->selector.prefixlen_d)) {
1162 write_seqcount_begin(&bin->count);
1163 n = xfrm_policy_inexact_insert_node(net,
1164 &bin->root_s,
1165 &policy->selector.saddr,
1166 policy->family,
1167 policy->selector.prefixlen_s,
1168 dir);
1169 write_seqcount_end(&bin->count);
1170 if (!n)
1171 return NULL;
1172
1173 return &n->hhead;
1174 }
1175
1176 /* daddr is fixed */
1177 write_seqcount_begin(&bin->count);
1178 n = xfrm_policy_inexact_insert_node(net,
1179 &bin->root_d,
1180 &policy->selector.daddr,
1181 policy->family,
1182 policy->selector.prefixlen_d, dir);
1183 write_seqcount_end(&bin->count);
1184 if (!n)
1185 return NULL;
1186
1187 /* saddr is wildcard */
1188 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr,
1189 policy->family,
1190 policy->selector.prefixlen_s))
1191 return &n->hhead;
1192
1193 write_seqcount_begin(&bin->count);
1194 n = xfrm_policy_inexact_insert_node(net,
1195 &n->root,
1196 &policy->selector.saddr,
1197 policy->family,
1198 policy->selector.prefixlen_s, dir);
1199 write_seqcount_end(&bin->count);
1200 if (!n)
1201 return NULL;
1202
1203 return &n->hhead;
1204 }
1205
1206 static struct xfrm_policy *
xfrm_policy_inexact_insert(struct xfrm_policy * policy,u8 dir,int excl)1207 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl)
1208 {
1209 struct xfrm_pol_inexact_bin *bin;
1210 struct xfrm_policy *delpol;
1211 struct hlist_head *chain;
1212 struct net *net;
1213
1214 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1215 if (!bin)
1216 return ERR_PTR(-ENOMEM);
1217
1218 net = xp_net(policy);
1219 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
1220
1221 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir);
1222 if (!chain) {
1223 __xfrm_policy_inexact_prune_bin(bin, false);
1224 return ERR_PTR(-ENOMEM);
1225 }
1226
1227 delpol = xfrm_policy_insert_list(chain, policy, excl);
1228 if (delpol && excl) {
1229 __xfrm_policy_inexact_prune_bin(bin, false);
1230 return ERR_PTR(-EEXIST);
1231 }
1232
1233 if (delpol)
1234 __xfrm_policy_inexact_prune_bin(bin, false);
1235
1236 return delpol;
1237 }
1238
xfrm_policy_is_dead_or_sk(const struct xfrm_policy * policy)1239 static bool xfrm_policy_is_dead_or_sk(const struct xfrm_policy *policy)
1240 {
1241 int dir;
1242
1243 if (policy->walk.dead)
1244 return true;
1245
1246 dir = xfrm_policy_id2dir(policy->index);
1247 return dir >= XFRM_POLICY_MAX;
1248 }
1249
xfrm_hash_rebuild(struct work_struct * work)1250 static void xfrm_hash_rebuild(struct work_struct *work)
1251 {
1252 struct net *net = container_of(work, struct net,
1253 xfrm.policy_hthresh.work);
1254 struct xfrm_policy *pol;
1255 struct xfrm_policy *policy;
1256 struct hlist_head *chain;
1257 struct hlist_node *newpos;
1258 int dir;
1259 unsigned seq;
1260 u8 lbits4, rbits4, lbits6, rbits6;
1261
1262 mutex_lock(&hash_resize_mutex);
1263
1264 /* read selector prefixlen thresholds */
1265 do {
1266 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1267
1268 lbits4 = net->xfrm.policy_hthresh.lbits4;
1269 rbits4 = net->xfrm.policy_hthresh.rbits4;
1270 lbits6 = net->xfrm.policy_hthresh.lbits6;
1271 rbits6 = net->xfrm.policy_hthresh.rbits6;
1272 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq));
1273
1274 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1275 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
1276
1277 /* make sure that we can insert the indirect policies again before
1278 * we start with destructive action.
1279 */
1280 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
1281 struct xfrm_pol_inexact_bin *bin;
1282 u8 dbits, sbits;
1283
1284 if (xfrm_policy_is_dead_or_sk(policy))
1285 continue;
1286
1287 dir = xfrm_policy_id2dir(policy->index);
1288 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1289 if (policy->family == AF_INET) {
1290 dbits = rbits4;
1291 sbits = lbits4;
1292 } else {
1293 dbits = rbits6;
1294 sbits = lbits6;
1295 }
1296 } else {
1297 if (policy->family == AF_INET) {
1298 dbits = lbits4;
1299 sbits = rbits4;
1300 } else {
1301 dbits = lbits6;
1302 sbits = rbits6;
1303 }
1304 }
1305
1306 if (policy->selector.prefixlen_d < dbits ||
1307 policy->selector.prefixlen_s < sbits)
1308 continue;
1309
1310 bin = xfrm_policy_inexact_alloc_bin(policy, dir);
1311 if (!bin)
1312 goto out_unlock;
1313
1314 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir))
1315 goto out_unlock;
1316 }
1317
1318 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
1319 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) {
1320 /* dir out => dst = remote, src = local */
1321 net->xfrm.policy_bydst[dir].dbits4 = rbits4;
1322 net->xfrm.policy_bydst[dir].sbits4 = lbits4;
1323 net->xfrm.policy_bydst[dir].dbits6 = rbits6;
1324 net->xfrm.policy_bydst[dir].sbits6 = lbits6;
1325 } else {
1326 /* dir in/fwd => dst = local, src = remote */
1327 net->xfrm.policy_bydst[dir].dbits4 = lbits4;
1328 net->xfrm.policy_bydst[dir].sbits4 = rbits4;
1329 net->xfrm.policy_bydst[dir].dbits6 = lbits6;
1330 net->xfrm.policy_bydst[dir].sbits6 = rbits6;
1331 }
1332 }
1333
1334 /* re-insert all policies by order of creation */
1335 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
1336 if (xfrm_policy_is_dead_or_sk(policy))
1337 continue;
1338
1339 hlist_del_rcu(&policy->bydst);
1340
1341 newpos = NULL;
1342 dir = xfrm_policy_id2dir(policy->index);
1343 chain = policy_hash_bysel(net, &policy->selector,
1344 policy->family, dir);
1345
1346 if (!chain) {
1347 void *p = xfrm_policy_inexact_insert(policy, dir, 0);
1348
1349 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p));
1350 continue;
1351 }
1352
1353 hlist_for_each_entry(pol, chain, bydst) {
1354 if (policy->priority >= pol->priority)
1355 newpos = &pol->bydst;
1356 else
1357 break;
1358 }
1359 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1360 hlist_add_behind_rcu(&policy->bydst, newpos);
1361 else
1362 hlist_add_head_rcu(&policy->bydst, chain);
1363 }
1364
1365 out_unlock:
1366 __xfrm_policy_inexact_flush(net);
1367 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation);
1368 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1369
1370 mutex_unlock(&hash_resize_mutex);
1371 }
1372
xfrm_policy_hash_rebuild(struct net * net)1373 void xfrm_policy_hash_rebuild(struct net *net)
1374 {
1375 schedule_work(&net->xfrm.policy_hthresh.work);
1376 }
1377 EXPORT_SYMBOL(xfrm_policy_hash_rebuild);
1378
1379 /* Generate new index... KAME seems to generate them ordered by cost
1380 * of an absolute inpredictability of ordering of rules. This will not pass. */
xfrm_gen_index(struct net * net,int dir,u32 index)1381 static u32 xfrm_gen_index(struct net *net, int dir, u32 index)
1382 {
1383 for (;;) {
1384 struct hlist_head *list;
1385 struct xfrm_policy *p;
1386 u32 idx;
1387 int found;
1388
1389 if (!index) {
1390 idx = (net->xfrm.idx_generator | dir);
1391 net->xfrm.idx_generator += 8;
1392 } else {
1393 idx = index;
1394 index = 0;
1395 }
1396
1397 if (idx == 0)
1398 idx = 8;
1399 list = net->xfrm.policy_byidx + idx_hash(net, idx);
1400 found = 0;
1401 hlist_for_each_entry(p, list, byidx) {
1402 if (p->index == idx) {
1403 found = 1;
1404 break;
1405 }
1406 }
1407 if (!found)
1408 return idx;
1409 }
1410 }
1411
selector_cmp(struct xfrm_selector * s1,struct xfrm_selector * s2)1412 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2)
1413 {
1414 u32 *p1 = (u32 *) s1;
1415 u32 *p2 = (u32 *) s2;
1416 int len = sizeof(struct xfrm_selector) / sizeof(u32);
1417 int i;
1418
1419 for (i = 0; i < len; i++) {
1420 if (p1[i] != p2[i])
1421 return 1;
1422 }
1423
1424 return 0;
1425 }
1426
xfrm_policy_requeue(struct xfrm_policy * old,struct xfrm_policy * new)1427 static void xfrm_policy_requeue(struct xfrm_policy *old,
1428 struct xfrm_policy *new)
1429 {
1430 struct xfrm_policy_queue *pq = &old->polq;
1431 struct sk_buff_head list;
1432
1433 if (skb_queue_empty(&pq->hold_queue))
1434 return;
1435
1436 __skb_queue_head_init(&list);
1437
1438 spin_lock_bh(&pq->hold_queue.lock);
1439 skb_queue_splice_init(&pq->hold_queue, &list);
1440 if (del_timer(&pq->hold_timer))
1441 xfrm_pol_put(old);
1442 spin_unlock_bh(&pq->hold_queue.lock);
1443
1444 pq = &new->polq;
1445
1446 spin_lock_bh(&pq->hold_queue.lock);
1447 skb_queue_splice(&list, &pq->hold_queue);
1448 pq->timeout = XFRM_QUEUE_TMO_MIN;
1449 if (!mod_timer(&pq->hold_timer, jiffies))
1450 xfrm_pol_hold(new);
1451 spin_unlock_bh(&pq->hold_queue.lock);
1452 }
1453
xfrm_policy_mark_match(const struct xfrm_mark * mark,struct xfrm_policy * pol)1454 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark,
1455 struct xfrm_policy *pol)
1456 {
1457 return mark->v == pol->mark.v && mark->m == pol->mark.m;
1458 }
1459
xfrm_pol_bin_key(const void * data,u32 len,u32 seed)1460 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed)
1461 {
1462 const struct xfrm_pol_inexact_key *k = data;
1463 u32 a = k->type << 24 | k->dir << 16 | k->family;
1464
1465 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)),
1466 seed);
1467 }
1468
xfrm_pol_bin_obj(const void * data,u32 len,u32 seed)1469 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed)
1470 {
1471 const struct xfrm_pol_inexact_bin *b = data;
1472
1473 return xfrm_pol_bin_key(&b->k, 0, seed);
1474 }
1475
xfrm_pol_bin_cmp(struct rhashtable_compare_arg * arg,const void * ptr)1476 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg,
1477 const void *ptr)
1478 {
1479 const struct xfrm_pol_inexact_key *key = arg->key;
1480 const struct xfrm_pol_inexact_bin *b = ptr;
1481 int ret;
1482
1483 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net)))
1484 return -1;
1485
1486 ret = b->k.dir ^ key->dir;
1487 if (ret)
1488 return ret;
1489
1490 ret = b->k.type ^ key->type;
1491 if (ret)
1492 return ret;
1493
1494 ret = b->k.family ^ key->family;
1495 if (ret)
1496 return ret;
1497
1498 return b->k.if_id ^ key->if_id;
1499 }
1500
1501 static const struct rhashtable_params xfrm_pol_inexact_params = {
1502 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head),
1503 .hashfn = xfrm_pol_bin_key,
1504 .obj_hashfn = xfrm_pol_bin_obj,
1505 .obj_cmpfn = xfrm_pol_bin_cmp,
1506 .automatic_shrinking = true,
1507 };
1508
xfrm_policy_insert_list(struct hlist_head * chain,struct xfrm_policy * policy,bool excl)1509 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain,
1510 struct xfrm_policy *policy,
1511 bool excl)
1512 {
1513 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL;
1514
1515 hlist_for_each_entry(pol, chain, bydst) {
1516 if (pol->type == policy->type &&
1517 pol->if_id == policy->if_id &&
1518 !selector_cmp(&pol->selector, &policy->selector) &&
1519 xfrm_policy_mark_match(&policy->mark, pol) &&
1520 xfrm_sec_ctx_match(pol->security, policy->security) &&
1521 !WARN_ON(delpol)) {
1522 if (excl)
1523 return ERR_PTR(-EEXIST);
1524 delpol = pol;
1525 if (policy->priority > pol->priority)
1526 continue;
1527 } else if (policy->priority >= pol->priority) {
1528 newpos = pol;
1529 continue;
1530 }
1531 if (delpol)
1532 break;
1533 }
1534
1535 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET)
1536 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst);
1537 else
1538 /* Packet offload policies enter to the head
1539 * to speed-up lookups.
1540 */
1541 hlist_add_head_rcu(&policy->bydst, chain);
1542
1543 return delpol;
1544 }
1545
xfrm_policy_insert(int dir,struct xfrm_policy * policy,int excl)1546 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
1547 {
1548 struct net *net = xp_net(policy);
1549 struct xfrm_policy *delpol;
1550 struct hlist_head *chain;
1551
1552 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1553 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir);
1554 if (chain)
1555 delpol = xfrm_policy_insert_list(chain, policy, excl);
1556 else
1557 delpol = xfrm_policy_inexact_insert(policy, dir, excl);
1558
1559 if (IS_ERR(delpol)) {
1560 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1561 return PTR_ERR(delpol);
1562 }
1563
1564 __xfrm_policy_link(policy, dir);
1565
1566 /* After previous checking, family can either be AF_INET or AF_INET6 */
1567 if (policy->family == AF_INET)
1568 rt_genid_bump_ipv4(net);
1569 else
1570 rt_genid_bump_ipv6(net);
1571
1572 if (delpol) {
1573 xfrm_policy_requeue(delpol, policy);
1574 __xfrm_policy_unlink(delpol, dir);
1575 }
1576 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index);
1577 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index));
1578 policy->curlft.add_time = ktime_get_real_seconds();
1579 policy->curlft.use_time = 0;
1580 if (!mod_timer(&policy->timer, jiffies + HZ))
1581 xfrm_pol_hold(policy);
1582 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1583
1584 if (delpol)
1585 xfrm_policy_kill(delpol);
1586 else if (xfrm_bydst_should_resize(net, dir, NULL))
1587 schedule_work(&net->xfrm.policy_hash_work);
1588
1589 return 0;
1590 }
1591 EXPORT_SYMBOL(xfrm_policy_insert);
1592
1593 static struct xfrm_policy *
__xfrm_policy_bysel_ctx(struct hlist_head * chain,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx)1594 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark,
1595 u32 if_id, u8 type, int dir, struct xfrm_selector *sel,
1596 struct xfrm_sec_ctx *ctx)
1597 {
1598 struct xfrm_policy *pol;
1599
1600 if (!chain)
1601 return NULL;
1602
1603 hlist_for_each_entry(pol, chain, bydst) {
1604 if (pol->type == type &&
1605 pol->if_id == if_id &&
1606 xfrm_policy_mark_match(mark, pol) &&
1607 !selector_cmp(sel, &pol->selector) &&
1608 xfrm_sec_ctx_match(ctx, pol->security))
1609 return pol;
1610 }
1611
1612 return NULL;
1613 }
1614
1615 struct xfrm_policy *
xfrm_policy_bysel_ctx(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,struct xfrm_selector * sel,struct xfrm_sec_ctx * ctx,int delete,int * err)1616 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1617 u8 type, int dir, struct xfrm_selector *sel,
1618 struct xfrm_sec_ctx *ctx, int delete, int *err)
1619 {
1620 struct xfrm_pol_inexact_bin *bin = NULL;
1621 struct xfrm_policy *pol, *ret = NULL;
1622 struct hlist_head *chain;
1623
1624 *err = 0;
1625 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1626 chain = policy_hash_bysel(net, sel, sel->family, dir);
1627 if (!chain) {
1628 struct xfrm_pol_inexact_candidates cand;
1629 int i;
1630
1631 bin = xfrm_policy_inexact_lookup(net, type,
1632 sel->family, dir, if_id);
1633 if (!bin) {
1634 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1635 return NULL;
1636 }
1637
1638 if (!xfrm_policy_find_inexact_candidates(&cand, bin,
1639 &sel->saddr,
1640 &sel->daddr)) {
1641 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1642 return NULL;
1643 }
1644
1645 pol = NULL;
1646 for (i = 0; i < ARRAY_SIZE(cand.res); i++) {
1647 struct xfrm_policy *tmp;
1648
1649 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark,
1650 if_id, type, dir,
1651 sel, ctx);
1652 if (!tmp)
1653 continue;
1654
1655 if (!pol || tmp->pos < pol->pos)
1656 pol = tmp;
1657 }
1658 } else {
1659 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir,
1660 sel, ctx);
1661 }
1662
1663 if (pol) {
1664 xfrm_pol_hold(pol);
1665 if (delete) {
1666 *err = security_xfrm_policy_delete(pol->security);
1667 if (*err) {
1668 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1669 return pol;
1670 }
1671 __xfrm_policy_unlink(pol, dir);
1672 }
1673 ret = pol;
1674 }
1675 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1676
1677 if (ret && delete)
1678 xfrm_policy_kill(ret);
1679 if (bin && delete)
1680 xfrm_policy_inexact_prune_bin(bin);
1681 return ret;
1682 }
1683 EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
1684
1685 struct xfrm_policy *
xfrm_policy_byid(struct net * net,const struct xfrm_mark * mark,u32 if_id,u8 type,int dir,u32 id,int delete,int * err)1686 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id,
1687 u8 type, int dir, u32 id, int delete, int *err)
1688 {
1689 struct xfrm_policy *pol, *ret;
1690 struct hlist_head *chain;
1691
1692 *err = -ENOENT;
1693 if (xfrm_policy_id2dir(id) != dir)
1694 return NULL;
1695
1696 *err = 0;
1697 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1698 chain = net->xfrm.policy_byidx + idx_hash(net, id);
1699 ret = NULL;
1700 hlist_for_each_entry(pol, chain, byidx) {
1701 if (pol->type == type && pol->index == id &&
1702 pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) {
1703 xfrm_pol_hold(pol);
1704 if (delete) {
1705 *err = security_xfrm_policy_delete(
1706 pol->security);
1707 if (*err) {
1708 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1709 return pol;
1710 }
1711 __xfrm_policy_unlink(pol, dir);
1712 }
1713 ret = pol;
1714 break;
1715 }
1716 }
1717 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1718
1719 if (ret && delete)
1720 xfrm_policy_kill(ret);
1721 return ret;
1722 }
1723 EXPORT_SYMBOL(xfrm_policy_byid);
1724
1725 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1726 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1727 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1728 {
1729 struct xfrm_policy *pol;
1730 int err = 0;
1731
1732 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1733 if (pol->walk.dead ||
1734 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1735 pol->type != type)
1736 continue;
1737
1738 err = security_xfrm_policy_delete(pol->security);
1739 if (err) {
1740 xfrm_audit_policy_delete(pol, 0, task_valid);
1741 return err;
1742 }
1743 }
1744 return err;
1745 }
1746
xfrm_dev_policy_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)1747 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1748 struct net_device *dev,
1749 bool task_valid)
1750 {
1751 struct xfrm_policy *pol;
1752 int err = 0;
1753
1754 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1755 if (pol->walk.dead ||
1756 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX ||
1757 pol->xdo.dev != dev)
1758 continue;
1759
1760 err = security_xfrm_policy_delete(pol->security);
1761 if (err) {
1762 xfrm_audit_policy_delete(pol, 0, task_valid);
1763 return err;
1764 }
1765 }
1766 return err;
1767 }
1768 #else
1769 static inline int
xfrm_policy_flush_secctx_check(struct net * net,u8 type,bool task_valid)1770 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid)
1771 {
1772 return 0;
1773 }
1774
xfrm_dev_policy_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)1775 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net,
1776 struct net_device *dev,
1777 bool task_valid)
1778 {
1779 return 0;
1780 }
1781 #endif
1782
xfrm_policy_flush(struct net * net,u8 type,bool task_valid)1783 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid)
1784 {
1785 int dir, err = 0, cnt = 0;
1786 struct xfrm_policy *pol;
1787
1788 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1789
1790 err = xfrm_policy_flush_secctx_check(net, type, task_valid);
1791 if (err)
1792 goto out;
1793
1794 again:
1795 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1796 if (pol->walk.dead)
1797 continue;
1798
1799 dir = xfrm_policy_id2dir(pol->index);
1800 if (dir >= XFRM_POLICY_MAX ||
1801 pol->type != type)
1802 continue;
1803
1804 __xfrm_policy_unlink(pol, dir);
1805 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1806 cnt++;
1807 xfrm_audit_policy_delete(pol, 1, task_valid);
1808 xfrm_policy_kill(pol);
1809 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1810 goto again;
1811 }
1812 if (cnt)
1813 __xfrm_policy_inexact_flush(net);
1814 else
1815 err = -ESRCH;
1816 out:
1817 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1818 return err;
1819 }
1820 EXPORT_SYMBOL(xfrm_policy_flush);
1821
xfrm_dev_policy_flush(struct net * net,struct net_device * dev,bool task_valid)1822 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1823 bool task_valid)
1824 {
1825 int dir, err = 0, cnt = 0;
1826 struct xfrm_policy *pol;
1827
1828 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1829
1830 err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid);
1831 if (err)
1832 goto out;
1833
1834 again:
1835 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) {
1836 if (pol->walk.dead)
1837 continue;
1838
1839 dir = xfrm_policy_id2dir(pol->index);
1840 if (dir >= XFRM_POLICY_MAX ||
1841 pol->xdo.dev != dev)
1842 continue;
1843
1844 __xfrm_policy_unlink(pol, dir);
1845 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1846 cnt++;
1847 xfrm_audit_policy_delete(pol, 1, task_valid);
1848 xfrm_policy_kill(pol);
1849 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1850 goto again;
1851 }
1852 if (cnt)
1853 __xfrm_policy_inexact_flush(net);
1854 else
1855 err = -ESRCH;
1856 out:
1857 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1858 return err;
1859 }
1860 EXPORT_SYMBOL(xfrm_dev_policy_flush);
1861
xfrm_policy_walk(struct net * net,struct xfrm_policy_walk * walk,int (* func)(struct xfrm_policy *,int,int,void *),void * data)1862 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1863 int (*func)(struct xfrm_policy *, int, int, void*),
1864 void *data)
1865 {
1866 struct xfrm_policy *pol;
1867 struct xfrm_policy_walk_entry *x;
1868 int error = 0;
1869
1870 if (walk->type >= XFRM_POLICY_TYPE_MAX &&
1871 walk->type != XFRM_POLICY_TYPE_ANY)
1872 return -EINVAL;
1873
1874 if (list_empty(&walk->walk.all) && walk->seq != 0)
1875 return 0;
1876
1877 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
1878 if (list_empty(&walk->walk.all))
1879 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all);
1880 else
1881 x = list_first_entry(&walk->walk.all,
1882 struct xfrm_policy_walk_entry, all);
1883
1884 list_for_each_entry_from(x, &net->xfrm.policy_all, all) {
1885 if (x->dead)
1886 continue;
1887 pol = container_of(x, struct xfrm_policy, walk);
1888 if (walk->type != XFRM_POLICY_TYPE_ANY &&
1889 walk->type != pol->type)
1890 continue;
1891 error = func(pol, xfrm_policy_id2dir(pol->index),
1892 walk->seq, data);
1893 if (error) {
1894 list_move_tail(&walk->walk.all, &x->all);
1895 goto out;
1896 }
1897 walk->seq++;
1898 }
1899 if (walk->seq == 0) {
1900 error = -ENOENT;
1901 goto out;
1902 }
1903 list_del_init(&walk->walk.all);
1904 out:
1905 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1906 return error;
1907 }
1908 EXPORT_SYMBOL(xfrm_policy_walk);
1909
xfrm_policy_walk_init(struct xfrm_policy_walk * walk,u8 type)1910 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type)
1911 {
1912 INIT_LIST_HEAD(&walk->walk.all);
1913 walk->walk.dead = 1;
1914 walk->type = type;
1915 walk->seq = 0;
1916 }
1917 EXPORT_SYMBOL(xfrm_policy_walk_init);
1918
xfrm_policy_walk_done(struct xfrm_policy_walk * walk,struct net * net)1919 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net)
1920 {
1921 if (list_empty(&walk->walk.all))
1922 return;
1923
1924 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */
1925 list_del(&walk->walk.all);
1926 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
1927 }
1928 EXPORT_SYMBOL(xfrm_policy_walk_done);
1929
1930 /*
1931 * Find policy to apply to this flow.
1932 *
1933 * Returns 0 if policy found, else an -errno.
1934 */
xfrm_policy_match(const struct xfrm_policy * pol,const struct flowi * fl,u8 type,u16 family,u32 if_id)1935 static int xfrm_policy_match(const struct xfrm_policy *pol,
1936 const struct flowi *fl,
1937 u8 type, u16 family, u32 if_id)
1938 {
1939 const struct xfrm_selector *sel = &pol->selector;
1940 int ret = -ESRCH;
1941 bool match;
1942
1943 if (pol->family != family ||
1944 pol->if_id != if_id ||
1945 (fl->flowi_mark & pol->mark.m) != pol->mark.v ||
1946 pol->type != type)
1947 return ret;
1948
1949 match = xfrm_selector_match(sel, fl, family);
1950 if (match)
1951 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid);
1952 return ret;
1953 }
1954
1955 static struct xfrm_pol_inexact_node *
xfrm_policy_lookup_inexact_addr(const struct rb_root * r,seqcount_spinlock_t * count,const xfrm_address_t * addr,u16 family)1956 xfrm_policy_lookup_inexact_addr(const struct rb_root *r,
1957 seqcount_spinlock_t *count,
1958 const xfrm_address_t *addr, u16 family)
1959 {
1960 const struct rb_node *parent;
1961 int seq;
1962
1963 again:
1964 seq = read_seqcount_begin(count);
1965
1966 parent = rcu_dereference_raw(r->rb_node);
1967 while (parent) {
1968 struct xfrm_pol_inexact_node *node;
1969 int delta;
1970
1971 node = rb_entry(parent, struct xfrm_pol_inexact_node, node);
1972
1973 delta = xfrm_policy_addr_delta(addr, &node->addr,
1974 node->prefixlen, family);
1975 if (delta < 0) {
1976 parent = rcu_dereference_raw(parent->rb_left);
1977 continue;
1978 } else if (delta > 0) {
1979 parent = rcu_dereference_raw(parent->rb_right);
1980 continue;
1981 }
1982
1983 return node;
1984 }
1985
1986 if (read_seqcount_retry(count, seq))
1987 goto again;
1988
1989 return NULL;
1990 }
1991
1992 static bool
xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_pol_inexact_bin * b,const xfrm_address_t * saddr,const xfrm_address_t * daddr)1993 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand,
1994 struct xfrm_pol_inexact_bin *b,
1995 const xfrm_address_t *saddr,
1996 const xfrm_address_t *daddr)
1997 {
1998 struct xfrm_pol_inexact_node *n;
1999 u16 family;
2000
2001 if (!b)
2002 return false;
2003
2004 family = b->k.family;
2005 memset(cand, 0, sizeof(*cand));
2006 cand->res[XFRM_POL_CAND_ANY] = &b->hhead;
2007
2008 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr,
2009 family);
2010 if (n) {
2011 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead;
2012 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr,
2013 family);
2014 if (n)
2015 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead;
2016 }
2017
2018 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr,
2019 family);
2020 if (n)
2021 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead;
2022
2023 return true;
2024 }
2025
2026 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup_rcu(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)2027 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family,
2028 u8 dir, u32 if_id)
2029 {
2030 struct xfrm_pol_inexact_key k = {
2031 .family = family,
2032 .type = type,
2033 .dir = dir,
2034 .if_id = if_id,
2035 };
2036
2037 write_pnet(&k.net, net);
2038
2039 return rhashtable_lookup(&xfrm_policy_inexact_table, &k,
2040 xfrm_pol_inexact_params);
2041 }
2042
2043 static struct xfrm_pol_inexact_bin *
xfrm_policy_inexact_lookup(struct net * net,u8 type,u16 family,u8 dir,u32 if_id)2044 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family,
2045 u8 dir, u32 if_id)
2046 {
2047 struct xfrm_pol_inexact_bin *bin;
2048
2049 lockdep_assert_held(&net->xfrm.xfrm_policy_lock);
2050
2051 rcu_read_lock();
2052 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2053 rcu_read_unlock();
2054
2055 return bin;
2056 }
2057
2058 static struct xfrm_policy *
__xfrm_policy_eval_candidates(struct hlist_head * chain,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,u32 if_id)2059 __xfrm_policy_eval_candidates(struct hlist_head *chain,
2060 struct xfrm_policy *prefer,
2061 const struct flowi *fl,
2062 u8 type, u16 family, u32 if_id)
2063 {
2064 u32 priority = prefer ? prefer->priority : ~0u;
2065 struct xfrm_policy *pol;
2066
2067 if (!chain)
2068 return NULL;
2069
2070 hlist_for_each_entry_rcu(pol, chain, bydst) {
2071 int err;
2072
2073 if (pol->priority > priority)
2074 break;
2075
2076 err = xfrm_policy_match(pol, fl, type, family, if_id);
2077 if (err) {
2078 if (err != -ESRCH)
2079 return ERR_PTR(err);
2080
2081 continue;
2082 }
2083
2084 if (prefer) {
2085 /* matches. Is it older than *prefer? */
2086 if (pol->priority == priority &&
2087 prefer->pos < pol->pos)
2088 return prefer;
2089 }
2090
2091 return pol;
2092 }
2093
2094 return NULL;
2095 }
2096
2097 static struct xfrm_policy *
xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates * cand,struct xfrm_policy * prefer,const struct flowi * fl,u8 type,u16 family,u32 if_id)2098 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand,
2099 struct xfrm_policy *prefer,
2100 const struct flowi *fl,
2101 u8 type, u16 family, u32 if_id)
2102 {
2103 struct xfrm_policy *tmp;
2104 int i;
2105
2106 for (i = 0; i < ARRAY_SIZE(cand->res); i++) {
2107 tmp = __xfrm_policy_eval_candidates(cand->res[i],
2108 prefer,
2109 fl, type, family, if_id);
2110 if (!tmp)
2111 continue;
2112
2113 if (IS_ERR(tmp))
2114 return tmp;
2115 prefer = tmp;
2116 }
2117
2118 return prefer;
2119 }
2120
xfrm_policy_lookup_bytype(struct net * net,u8 type,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2121 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type,
2122 const struct flowi *fl,
2123 u16 family, u8 dir,
2124 u32 if_id)
2125 {
2126 struct xfrm_pol_inexact_candidates cand;
2127 const xfrm_address_t *daddr, *saddr;
2128 struct xfrm_pol_inexact_bin *bin;
2129 struct xfrm_policy *pol, *ret;
2130 struct hlist_head *chain;
2131 unsigned int sequence;
2132 int err;
2133
2134 daddr = xfrm_flowi_daddr(fl, family);
2135 saddr = xfrm_flowi_saddr(fl, family);
2136 if (unlikely(!daddr || !saddr))
2137 return NULL;
2138
2139 rcu_read_lock();
2140 retry:
2141 do {
2142 sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation);
2143 chain = policy_hash_direct(net, daddr, saddr, family, dir);
2144 } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence));
2145
2146 ret = NULL;
2147 hlist_for_each_entry_rcu(pol, chain, bydst) {
2148 err = xfrm_policy_match(pol, fl, type, family, if_id);
2149 if (err) {
2150 if (err == -ESRCH)
2151 continue;
2152 else {
2153 ret = ERR_PTR(err);
2154 goto fail;
2155 }
2156 } else {
2157 ret = pol;
2158 break;
2159 }
2160 }
2161 if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET)
2162 goto skip_inexact;
2163
2164 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id);
2165 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr,
2166 daddr))
2167 goto skip_inexact;
2168
2169 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type,
2170 family, if_id);
2171 if (pol) {
2172 ret = pol;
2173 if (IS_ERR(pol))
2174 goto fail;
2175 }
2176
2177 skip_inexact:
2178 if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence))
2179 goto retry;
2180
2181 if (ret && !xfrm_pol_hold_rcu(ret))
2182 goto retry;
2183 fail:
2184 rcu_read_unlock();
2185
2186 return ret;
2187 }
2188
xfrm_policy_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,u32 if_id)2189 static struct xfrm_policy *xfrm_policy_lookup(struct net *net,
2190 const struct flowi *fl,
2191 u16 family, u8 dir, u32 if_id)
2192 {
2193 #ifdef CONFIG_XFRM_SUB_POLICY
2194 struct xfrm_policy *pol;
2195
2196 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family,
2197 dir, if_id);
2198 if (pol != NULL)
2199 return pol;
2200 #endif
2201 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family,
2202 dir, if_id);
2203 }
2204
xfrm_sk_policy_lookup(const struct sock * sk,int dir,const struct flowi * fl,u16 family,u32 if_id)2205 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
2206 const struct flowi *fl,
2207 u16 family, u32 if_id)
2208 {
2209 struct xfrm_policy *pol;
2210
2211 rcu_read_lock();
2212 again:
2213 pol = rcu_dereference(sk->sk_policy[dir]);
2214 if (pol != NULL) {
2215 bool match;
2216 int err = 0;
2217
2218 if (pol->family != family) {
2219 pol = NULL;
2220 goto out;
2221 }
2222
2223 match = xfrm_selector_match(&pol->selector, fl, family);
2224 if (match) {
2225 if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v ||
2226 pol->if_id != if_id) {
2227 pol = NULL;
2228 goto out;
2229 }
2230 err = security_xfrm_policy_lookup(pol->security,
2231 fl->flowi_secid);
2232 if (!err) {
2233 if (!xfrm_pol_hold_rcu(pol))
2234 goto again;
2235 } else if (err == -ESRCH) {
2236 pol = NULL;
2237 } else {
2238 pol = ERR_PTR(err);
2239 }
2240 } else
2241 pol = NULL;
2242 }
2243 out:
2244 rcu_read_unlock();
2245 return pol;
2246 }
2247
xfrm_gen_pos_slow(struct net * net)2248 static u32 xfrm_gen_pos_slow(struct net *net)
2249 {
2250 struct xfrm_policy *policy;
2251 u32 i = 0;
2252
2253 /* oldest entry is last in list */
2254 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) {
2255 if (!xfrm_policy_is_dead_or_sk(policy))
2256 policy->pos = ++i;
2257 }
2258
2259 return i;
2260 }
2261
xfrm_gen_pos(struct net * net)2262 static u32 xfrm_gen_pos(struct net *net)
2263 {
2264 const struct xfrm_policy *policy;
2265 u32 i = 0;
2266
2267 /* most recently added policy is at the head of the list */
2268 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) {
2269 if (xfrm_policy_is_dead_or_sk(policy))
2270 continue;
2271
2272 if (policy->pos == UINT_MAX)
2273 return xfrm_gen_pos_slow(net);
2274
2275 i = policy->pos + 1;
2276 break;
2277 }
2278
2279 return i;
2280 }
2281
__xfrm_policy_link(struct xfrm_policy * pol,int dir)2282 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir)
2283 {
2284 struct net *net = xp_net(pol);
2285
2286 switch (dir) {
2287 case XFRM_POLICY_IN:
2288 case XFRM_POLICY_FWD:
2289 case XFRM_POLICY_OUT:
2290 pol->pos = xfrm_gen_pos(net);
2291 break;
2292 }
2293
2294 list_add(&pol->walk.all, &net->xfrm.policy_all);
2295 net->xfrm.policy_count[dir]++;
2296 xfrm_pol_hold(pol);
2297 }
2298
__xfrm_policy_unlink(struct xfrm_policy * pol,int dir)2299 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
2300 int dir)
2301 {
2302 struct net *net = xp_net(pol);
2303
2304 if (list_empty(&pol->walk.all))
2305 return NULL;
2306
2307 /* Socket policies are not hashed. */
2308 if (!hlist_unhashed(&pol->bydst)) {
2309 hlist_del_rcu(&pol->bydst);
2310 hlist_del(&pol->byidx);
2311 }
2312
2313 list_del_init(&pol->walk.all);
2314 net->xfrm.policy_count[dir]--;
2315
2316 return pol;
2317 }
2318
xfrm_sk_policy_link(struct xfrm_policy * pol,int dir)2319 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir)
2320 {
2321 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir);
2322 }
2323
xfrm_sk_policy_unlink(struct xfrm_policy * pol,int dir)2324 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir)
2325 {
2326 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir);
2327 }
2328
xfrm_policy_delete(struct xfrm_policy * pol,int dir)2329 int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
2330 {
2331 struct net *net = xp_net(pol);
2332
2333 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2334 pol = __xfrm_policy_unlink(pol, dir);
2335 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2336 if (pol) {
2337 xfrm_policy_kill(pol);
2338 return 0;
2339 }
2340 return -ENOENT;
2341 }
2342 EXPORT_SYMBOL(xfrm_policy_delete);
2343
xfrm_sk_policy_insert(struct sock * sk,int dir,struct xfrm_policy * pol)2344 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
2345 {
2346 struct net *net = sock_net(sk);
2347 struct xfrm_policy *old_pol;
2348
2349 #ifdef CONFIG_XFRM_SUB_POLICY
2350 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN)
2351 return -EINVAL;
2352 #endif
2353
2354 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2355 old_pol = rcu_dereference_protected(sk->sk_policy[dir],
2356 lockdep_is_held(&net->xfrm.xfrm_policy_lock));
2357 if (pol) {
2358 pol->curlft.add_time = ktime_get_real_seconds();
2359 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0);
2360 xfrm_sk_policy_link(pol, dir);
2361 }
2362 rcu_assign_pointer(sk->sk_policy[dir], pol);
2363 if (old_pol) {
2364 if (pol)
2365 xfrm_policy_requeue(old_pol, pol);
2366
2367 /* Unlinking succeeds always. This is the only function
2368 * allowed to delete or replace socket policy.
2369 */
2370 xfrm_sk_policy_unlink(old_pol, dir);
2371 }
2372 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2373
2374 if (old_pol) {
2375 xfrm_policy_kill(old_pol);
2376 }
2377 return 0;
2378 }
2379
clone_policy(const struct xfrm_policy * old,int dir)2380 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
2381 {
2382 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC);
2383 struct net *net = xp_net(old);
2384
2385 if (newp) {
2386 newp->selector = old->selector;
2387 if (security_xfrm_policy_clone(old->security,
2388 &newp->security)) {
2389 kfree(newp);
2390 return NULL; /* ENOMEM */
2391 }
2392 newp->lft = old->lft;
2393 newp->curlft = old->curlft;
2394 newp->mark = old->mark;
2395 newp->if_id = old->if_id;
2396 newp->action = old->action;
2397 newp->flags = old->flags;
2398 newp->xfrm_nr = old->xfrm_nr;
2399 newp->index = old->index;
2400 newp->type = old->type;
2401 newp->family = old->family;
2402 memcpy(newp->xfrm_vec, old->xfrm_vec,
2403 newp->xfrm_nr*sizeof(struct xfrm_tmpl));
2404 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
2405 xfrm_sk_policy_link(newp, dir);
2406 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
2407 xfrm_pol_put(newp);
2408 }
2409 return newp;
2410 }
2411
__xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)2412 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
2413 {
2414 const struct xfrm_policy *p;
2415 struct xfrm_policy *np;
2416 int i, ret = 0;
2417
2418 rcu_read_lock();
2419 for (i = 0; i < 2; i++) {
2420 p = rcu_dereference(osk->sk_policy[i]);
2421 if (p) {
2422 np = clone_policy(p, i);
2423 if (unlikely(!np)) {
2424 ret = -ENOMEM;
2425 break;
2426 }
2427 rcu_assign_pointer(sk->sk_policy[i], np);
2428 }
2429 }
2430 rcu_read_unlock();
2431 return ret;
2432 }
2433
2434 static int
xfrm_get_saddr(struct net * net,int oif,xfrm_address_t * local,xfrm_address_t * remote,unsigned short family,u32 mark)2435 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
2436 xfrm_address_t *remote, unsigned short family, u32 mark)
2437 {
2438 int err;
2439 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2440
2441 if (unlikely(afinfo == NULL))
2442 return -EINVAL;
2443 err = afinfo->get_saddr(net, oif, local, remote, mark);
2444 rcu_read_unlock();
2445 return err;
2446 }
2447
2448 /* Resolve list of templates for the flow, given policy. */
2449
2450 static int
xfrm_tmpl_resolve_one(struct xfrm_policy * policy,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2451 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
2452 struct xfrm_state **xfrm, unsigned short family)
2453 {
2454 struct net *net = xp_net(policy);
2455 int nx;
2456 int i, error;
2457 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family);
2458 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family);
2459 xfrm_address_t tmp;
2460
2461 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) {
2462 struct xfrm_state *x;
2463 xfrm_address_t *remote = daddr;
2464 xfrm_address_t *local = saddr;
2465 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i];
2466
2467 if (tmpl->mode == XFRM_MODE_TUNNEL ||
2468 tmpl->mode == XFRM_MODE_BEET) {
2469 remote = &tmpl->id.daddr;
2470 local = &tmpl->saddr;
2471 if (xfrm_addr_any(local, tmpl->encap_family)) {
2472 error = xfrm_get_saddr(net, fl->flowi_oif,
2473 &tmp, remote,
2474 tmpl->encap_family, 0);
2475 if (error)
2476 goto fail;
2477 local = &tmp;
2478 }
2479 }
2480
2481 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error,
2482 family, policy->if_id);
2483 if (x && x->dir && x->dir != XFRM_SA_DIR_OUT) {
2484 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEDIRERROR);
2485 xfrm_state_put(x);
2486 error = -EINVAL;
2487 goto fail;
2488 }
2489
2490 if (x && x->km.state == XFRM_STATE_VALID) {
2491 xfrm[nx++] = x;
2492 daddr = remote;
2493 saddr = local;
2494 continue;
2495 }
2496 if (x) {
2497 error = (x->km.state == XFRM_STATE_ERROR ?
2498 -EINVAL : -EAGAIN);
2499 xfrm_state_put(x);
2500 } else if (error == -ESRCH) {
2501 error = -EAGAIN;
2502 }
2503
2504 if (!tmpl->optional)
2505 goto fail;
2506 }
2507 return nx;
2508
2509 fail:
2510 for (nx--; nx >= 0; nx--)
2511 xfrm_state_put(xfrm[nx]);
2512 return error;
2513 }
2514
2515 static int
xfrm_tmpl_resolve(struct xfrm_policy ** pols,int npols,const struct flowi * fl,struct xfrm_state ** xfrm,unsigned short family)2516 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl,
2517 struct xfrm_state **xfrm, unsigned short family)
2518 {
2519 struct xfrm_state *tp[XFRM_MAX_DEPTH];
2520 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm;
2521 int cnx = 0;
2522 int error;
2523 int ret;
2524 int i;
2525
2526 for (i = 0; i < npols; i++) {
2527 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) {
2528 error = -ENOBUFS;
2529 goto fail;
2530 }
2531
2532 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family);
2533 if (ret < 0) {
2534 error = ret;
2535 goto fail;
2536 } else
2537 cnx += ret;
2538 }
2539
2540 /* found states are sorted for outbound processing */
2541 if (npols > 1)
2542 xfrm_state_sort(xfrm, tpp, cnx, family);
2543
2544 return cnx;
2545
2546 fail:
2547 for (cnx--; cnx >= 0; cnx--)
2548 xfrm_state_put(tpp[cnx]);
2549 return error;
2550
2551 }
2552
xfrm_get_tos(const struct flowi * fl,int family)2553 static int xfrm_get_tos(const struct flowi *fl, int family)
2554 {
2555 if (family == AF_INET)
2556 return fl->u.ip4.flowi4_tos & INET_DSCP_MASK;
2557
2558 return 0;
2559 }
2560
xfrm_alloc_dst(struct net * net,int family)2561 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
2562 {
2563 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
2564 struct dst_ops *dst_ops;
2565 struct xfrm_dst *xdst;
2566
2567 if (!afinfo)
2568 return ERR_PTR(-EINVAL);
2569
2570 switch (family) {
2571 case AF_INET:
2572 dst_ops = &net->xfrm.xfrm4_dst_ops;
2573 break;
2574 #if IS_ENABLED(CONFIG_IPV6)
2575 case AF_INET6:
2576 dst_ops = &net->xfrm.xfrm6_dst_ops;
2577 break;
2578 #endif
2579 default:
2580 BUG();
2581 }
2582 xdst = dst_alloc(dst_ops, NULL, DST_OBSOLETE_NONE, 0);
2583
2584 if (likely(xdst)) {
2585 memset_after(xdst, 0, u.dst);
2586 } else
2587 xdst = ERR_PTR(-ENOBUFS);
2588
2589 rcu_read_unlock();
2590
2591 return xdst;
2592 }
2593
xfrm_init_path(struct xfrm_dst * path,struct dst_entry * dst,int nfheader_len)2594 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
2595 int nfheader_len)
2596 {
2597 if (dst->ops->family == AF_INET6) {
2598 path->path_cookie = rt6_get_cookie(dst_rt6_info(dst));
2599 path->u.rt6.rt6i_nfheader_len = nfheader_len;
2600 }
2601 }
2602
xfrm_fill_dst(struct xfrm_dst * xdst,struct net_device * dev,const struct flowi * fl)2603 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
2604 const struct flowi *fl)
2605 {
2606 const struct xfrm_policy_afinfo *afinfo =
2607 xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
2608 int err;
2609
2610 if (!afinfo)
2611 return -EINVAL;
2612
2613 err = afinfo->fill_dst(xdst, dev, fl);
2614
2615 rcu_read_unlock();
2616
2617 return err;
2618 }
2619
2620
2621 /* Allocate chain of dst_entry's, attach known xfrm's, calculate
2622 * all the metrics... Shortly, bundle a bundle.
2623 */
2624
xfrm_bundle_create(struct xfrm_policy * policy,struct xfrm_state ** xfrm,struct xfrm_dst ** bundle,int nx,const struct flowi * fl,struct dst_entry * dst)2625 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
2626 struct xfrm_state **xfrm,
2627 struct xfrm_dst **bundle,
2628 int nx,
2629 const struct flowi *fl,
2630 struct dst_entry *dst)
2631 {
2632 const struct xfrm_state_afinfo *afinfo;
2633 const struct xfrm_mode *inner_mode;
2634 struct net *net = xp_net(policy);
2635 unsigned long now = jiffies;
2636 struct net_device *dev;
2637 struct xfrm_dst *xdst_prev = NULL;
2638 struct xfrm_dst *xdst0 = NULL;
2639 int i = 0;
2640 int err;
2641 int header_len = 0;
2642 int nfheader_len = 0;
2643 int trailer_len = 0;
2644 int tos;
2645 int family = policy->selector.family;
2646 xfrm_address_t saddr, daddr;
2647
2648 xfrm_flowi_addr_get(fl, &saddr, &daddr, family);
2649
2650 tos = xfrm_get_tos(fl, family);
2651
2652 dst_hold(dst);
2653
2654 for (; i < nx; i++) {
2655 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family);
2656 struct dst_entry *dst1 = &xdst->u.dst;
2657
2658 err = PTR_ERR(xdst);
2659 if (IS_ERR(xdst)) {
2660 dst_release(dst);
2661 goto put_states;
2662 }
2663
2664 bundle[i] = xdst;
2665 if (!xdst_prev)
2666 xdst0 = xdst;
2667 else
2668 /* Ref count is taken during xfrm_alloc_dst()
2669 * No need to do dst_clone() on dst1
2670 */
2671 xfrm_dst_set_child(xdst_prev, &xdst->u.dst);
2672
2673 if (xfrm[i]->sel.family == AF_UNSPEC) {
2674 inner_mode = xfrm_ip2inner_mode(xfrm[i],
2675 xfrm_af2proto(family));
2676 if (!inner_mode) {
2677 err = -EAFNOSUPPORT;
2678 dst_release(dst);
2679 goto put_states;
2680 }
2681 } else
2682 inner_mode = &xfrm[i]->inner_mode;
2683
2684 xdst->route = dst;
2685 dst_copy_metrics(dst1, dst);
2686
2687 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
2688 __u32 mark = 0;
2689 int oif;
2690
2691 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m)
2692 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]);
2693
2694 if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET)
2695 family = xfrm[i]->props.family;
2696
2697 oif = fl->flowi_oif ? : fl->flowi_l3mdev;
2698 dst = xfrm_dst_lookup(xfrm[i], tos, oif,
2699 &saddr, &daddr, family, mark);
2700 err = PTR_ERR(dst);
2701 if (IS_ERR(dst))
2702 goto put_states;
2703 } else
2704 dst_hold(dst);
2705
2706 dst1->xfrm = xfrm[i];
2707 xdst->xfrm_genid = xfrm[i]->genid;
2708
2709 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
2710 dst1->lastuse = now;
2711
2712 dst1->input = dst_discard;
2713
2714 rcu_read_lock();
2715 afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family);
2716 if (likely(afinfo))
2717 dst1->output = afinfo->output;
2718 else
2719 dst1->output = dst_discard_out;
2720 rcu_read_unlock();
2721
2722 xdst_prev = xdst;
2723
2724 header_len += xfrm[i]->props.header_len;
2725 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT)
2726 nfheader_len += xfrm[i]->props.header_len;
2727 trailer_len += xfrm[i]->props.trailer_len;
2728 }
2729
2730 xfrm_dst_set_child(xdst_prev, dst);
2731 xdst0->path = dst;
2732
2733 err = -ENODEV;
2734 dev = dst->dev;
2735 if (!dev)
2736 goto free_dst;
2737
2738 xfrm_init_path(xdst0, dst, nfheader_len);
2739 xfrm_init_pmtu(bundle, nx);
2740
2741 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst;
2742 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) {
2743 err = xfrm_fill_dst(xdst_prev, dev, fl);
2744 if (err)
2745 goto free_dst;
2746
2747 xdst_prev->u.dst.header_len = header_len;
2748 xdst_prev->u.dst.trailer_len = trailer_len;
2749 header_len -= xdst_prev->u.dst.xfrm->props.header_len;
2750 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len;
2751 }
2752
2753 return &xdst0->u.dst;
2754
2755 put_states:
2756 for (; i < nx; i++)
2757 xfrm_state_put(xfrm[i]);
2758 free_dst:
2759 if (xdst0)
2760 dst_release_immediate(&xdst0->u.dst);
2761
2762 return ERR_PTR(err);
2763 }
2764
xfrm_expand_policies(const struct flowi * fl,u16 family,struct xfrm_policy ** pols,int * num_pols,int * num_xfrms)2765 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
2766 struct xfrm_policy **pols,
2767 int *num_pols, int *num_xfrms)
2768 {
2769 int i;
2770
2771 if (*num_pols == 0 || !pols[0]) {
2772 *num_pols = 0;
2773 *num_xfrms = 0;
2774 return 0;
2775 }
2776 if (IS_ERR(pols[0])) {
2777 *num_pols = 0;
2778 return PTR_ERR(pols[0]);
2779 }
2780
2781 *num_xfrms = pols[0]->xfrm_nr;
2782
2783 #ifdef CONFIG_XFRM_SUB_POLICY
2784 if (pols[0]->action == XFRM_POLICY_ALLOW &&
2785 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
2786 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
2787 XFRM_POLICY_TYPE_MAIN,
2788 fl, family,
2789 XFRM_POLICY_OUT,
2790 pols[0]->if_id);
2791 if (pols[1]) {
2792 if (IS_ERR(pols[1])) {
2793 xfrm_pols_put(pols, *num_pols);
2794 *num_pols = 0;
2795 return PTR_ERR(pols[1]);
2796 }
2797 (*num_pols)++;
2798 (*num_xfrms) += pols[1]->xfrm_nr;
2799 }
2800 }
2801 #endif
2802 for (i = 0; i < *num_pols; i++) {
2803 if (pols[i]->action != XFRM_POLICY_ALLOW) {
2804 *num_xfrms = -1;
2805 break;
2806 }
2807 }
2808
2809 return 0;
2810
2811 }
2812
2813 static struct xfrm_dst *
xfrm_resolve_and_create_bundle(struct xfrm_policy ** pols,int num_pols,const struct flowi * fl,u16 family,struct dst_entry * dst_orig)2814 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
2815 const struct flowi *fl, u16 family,
2816 struct dst_entry *dst_orig)
2817 {
2818 struct net *net = xp_net(pols[0]);
2819 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
2820 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
2821 struct xfrm_dst *xdst;
2822 struct dst_entry *dst;
2823 int err;
2824
2825 /* Try to instantiate a bundle */
2826 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
2827 if (err <= 0) {
2828 if (err == 0)
2829 return NULL;
2830
2831 if (err != -EAGAIN)
2832 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
2833 return ERR_PTR(err);
2834 }
2835
2836 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig);
2837 if (IS_ERR(dst)) {
2838 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
2839 return ERR_CAST(dst);
2840 }
2841
2842 xdst = (struct xfrm_dst *)dst;
2843 xdst->num_xfrms = err;
2844 xdst->num_pols = num_pols;
2845 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
2846 xdst->policy_genid = atomic_read(&pols[0]->genid);
2847
2848 return xdst;
2849 }
2850
xfrm_policy_queue_process(struct timer_list * t)2851 static void xfrm_policy_queue_process(struct timer_list *t)
2852 {
2853 struct sk_buff *skb;
2854 struct sock *sk;
2855 struct dst_entry *dst;
2856 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer);
2857 struct net *net = xp_net(pol);
2858 struct xfrm_policy_queue *pq = &pol->polq;
2859 struct flowi fl;
2860 struct sk_buff_head list;
2861 __u32 skb_mark;
2862
2863 spin_lock(&pq->hold_queue.lock);
2864 skb = skb_peek(&pq->hold_queue);
2865 if (!skb) {
2866 spin_unlock(&pq->hold_queue.lock);
2867 goto out;
2868 }
2869 dst = skb_dst(skb);
2870 sk = skb->sk;
2871
2872 /* Fixup the mark to support VTI. */
2873 skb_mark = skb->mark;
2874 skb->mark = pol->mark.v;
2875 xfrm_decode_session(net, skb, &fl, dst->ops->family);
2876 skb->mark = skb_mark;
2877 spin_unlock(&pq->hold_queue.lock);
2878
2879 dst_hold(xfrm_dst_path(dst));
2880 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE);
2881 if (IS_ERR(dst))
2882 goto purge_queue;
2883
2884 if (dst->flags & DST_XFRM_QUEUE) {
2885 dst_release(dst);
2886
2887 if (pq->timeout >= XFRM_QUEUE_TMO_MAX)
2888 goto purge_queue;
2889
2890 pq->timeout = pq->timeout << 1;
2891 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout))
2892 xfrm_pol_hold(pol);
2893 goto out;
2894 }
2895
2896 dst_release(dst);
2897
2898 __skb_queue_head_init(&list);
2899
2900 spin_lock(&pq->hold_queue.lock);
2901 pq->timeout = 0;
2902 skb_queue_splice_init(&pq->hold_queue, &list);
2903 spin_unlock(&pq->hold_queue.lock);
2904
2905 while (!skb_queue_empty(&list)) {
2906 skb = __skb_dequeue(&list);
2907
2908 /* Fixup the mark to support VTI. */
2909 skb_mark = skb->mark;
2910 skb->mark = pol->mark.v;
2911 xfrm_decode_session(net, skb, &fl, skb_dst(skb)->ops->family);
2912 skb->mark = skb_mark;
2913
2914 dst_hold(xfrm_dst_path(skb_dst(skb)));
2915 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0);
2916 if (IS_ERR(dst)) {
2917 kfree_skb(skb);
2918 continue;
2919 }
2920
2921 nf_reset_ct(skb);
2922 skb_dst_drop(skb);
2923 skb_dst_set(skb, dst);
2924
2925 dst_output(net, skb->sk, skb);
2926 }
2927
2928 out:
2929 xfrm_pol_put(pol);
2930 return;
2931
2932 purge_queue:
2933 pq->timeout = 0;
2934 skb_queue_purge(&pq->hold_queue);
2935 xfrm_pol_put(pol);
2936 }
2937
xdst_queue_output(struct net * net,struct sock * sk,struct sk_buff * skb)2938 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb)
2939 {
2940 unsigned long sched_next;
2941 struct dst_entry *dst = skb_dst(skb);
2942 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
2943 struct xfrm_policy *pol = xdst->pols[0];
2944 struct xfrm_policy_queue *pq = &pol->polq;
2945
2946 if (unlikely(skb_fclone_busy(sk, skb))) {
2947 kfree_skb(skb);
2948 return 0;
2949 }
2950
2951 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) {
2952 kfree_skb(skb);
2953 return -EAGAIN;
2954 }
2955
2956 skb_dst_force(skb);
2957
2958 spin_lock_bh(&pq->hold_queue.lock);
2959
2960 if (!pq->timeout)
2961 pq->timeout = XFRM_QUEUE_TMO_MIN;
2962
2963 sched_next = jiffies + pq->timeout;
2964
2965 if (del_timer(&pq->hold_timer)) {
2966 if (time_before(pq->hold_timer.expires, sched_next))
2967 sched_next = pq->hold_timer.expires;
2968 xfrm_pol_put(pol);
2969 }
2970
2971 __skb_queue_tail(&pq->hold_queue, skb);
2972 if (!mod_timer(&pq->hold_timer, sched_next))
2973 xfrm_pol_hold(pol);
2974
2975 spin_unlock_bh(&pq->hold_queue.lock);
2976
2977 return 0;
2978 }
2979
xfrm_create_dummy_bundle(struct net * net,struct xfrm_flo * xflo,const struct flowi * fl,int num_xfrms,u16 family)2980 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net,
2981 struct xfrm_flo *xflo,
2982 const struct flowi *fl,
2983 int num_xfrms,
2984 u16 family)
2985 {
2986 int err;
2987 struct net_device *dev;
2988 struct dst_entry *dst;
2989 struct dst_entry *dst1;
2990 struct xfrm_dst *xdst;
2991
2992 xdst = xfrm_alloc_dst(net, family);
2993 if (IS_ERR(xdst))
2994 return xdst;
2995
2996 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) ||
2997 net->xfrm.sysctl_larval_drop ||
2998 num_xfrms <= 0)
2999 return xdst;
3000
3001 dst = xflo->dst_orig;
3002 dst1 = &xdst->u.dst;
3003 dst_hold(dst);
3004 xdst->route = dst;
3005
3006 dst_copy_metrics(dst1, dst);
3007
3008 dst1->obsolete = DST_OBSOLETE_FORCE_CHK;
3009 dst1->flags |= DST_XFRM_QUEUE;
3010 dst1->lastuse = jiffies;
3011
3012 dst1->input = dst_discard;
3013 dst1->output = xdst_queue_output;
3014
3015 dst_hold(dst);
3016 xfrm_dst_set_child(xdst, dst);
3017 xdst->path = dst;
3018
3019 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0);
3020
3021 err = -ENODEV;
3022 dev = dst->dev;
3023 if (!dev)
3024 goto free_dst;
3025
3026 err = xfrm_fill_dst(xdst, dev, fl);
3027 if (err)
3028 goto free_dst;
3029
3030 out:
3031 return xdst;
3032
3033 free_dst:
3034 dst_release(dst1);
3035 xdst = ERR_PTR(err);
3036 goto out;
3037 }
3038
xfrm_bundle_lookup(struct net * net,const struct flowi * fl,u16 family,u8 dir,struct xfrm_flo * xflo,u32 if_id)3039 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net,
3040 const struct flowi *fl,
3041 u16 family, u8 dir,
3042 struct xfrm_flo *xflo, u32 if_id)
3043 {
3044 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3045 int num_pols = 0, num_xfrms = 0, err;
3046 struct xfrm_dst *xdst;
3047
3048 /* Resolve policies to use if we couldn't get them from
3049 * previous cache entry */
3050 num_pols = 1;
3051 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id);
3052 err = xfrm_expand_policies(fl, family, pols,
3053 &num_pols, &num_xfrms);
3054 if (err < 0)
3055 goto inc_error;
3056 if (num_pols == 0)
3057 return NULL;
3058 if (num_xfrms <= 0)
3059 goto make_dummy_bundle;
3060
3061 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family,
3062 xflo->dst_orig);
3063 if (IS_ERR(xdst)) {
3064 err = PTR_ERR(xdst);
3065 if (err == -EREMOTE) {
3066 xfrm_pols_put(pols, num_pols);
3067 return NULL;
3068 }
3069
3070 if (err != -EAGAIN)
3071 goto error;
3072 goto make_dummy_bundle;
3073 } else if (xdst == NULL) {
3074 num_xfrms = 0;
3075 goto make_dummy_bundle;
3076 }
3077
3078 return xdst;
3079
3080 make_dummy_bundle:
3081 /* We found policies, but there's no bundles to instantiate:
3082 * either because the policy blocks, has no transformations or
3083 * we could not build template (no xfrm_states).*/
3084 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family);
3085 if (IS_ERR(xdst)) {
3086 xfrm_pols_put(pols, num_pols);
3087 return ERR_CAST(xdst);
3088 }
3089 xdst->num_pols = num_pols;
3090 xdst->num_xfrms = num_xfrms;
3091 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
3092
3093 return xdst;
3094
3095 inc_error:
3096 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
3097 error:
3098 xfrm_pols_put(pols, num_pols);
3099 return ERR_PTR(err);
3100 }
3101
make_blackhole(struct net * net,u16 family,struct dst_entry * dst_orig)3102 static struct dst_entry *make_blackhole(struct net *net, u16 family,
3103 struct dst_entry *dst_orig)
3104 {
3105 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
3106 struct dst_entry *ret;
3107
3108 if (!afinfo) {
3109 dst_release(dst_orig);
3110 return ERR_PTR(-EINVAL);
3111 } else {
3112 ret = afinfo->blackhole_route(net, dst_orig);
3113 }
3114 rcu_read_unlock();
3115
3116 return ret;
3117 }
3118
3119 /* Finds/creates a bundle for given flow and if_id
3120 *
3121 * At the moment we eat a raw IP route. Mostly to speed up lookups
3122 * on interfaces with disabled IPsec.
3123 *
3124 * xfrm_lookup uses an if_id of 0 by default, and is provided for
3125 * compatibility
3126 */
xfrm_lookup_with_ifid(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags,u32 if_id)3127 struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
3128 struct dst_entry *dst_orig,
3129 const struct flowi *fl,
3130 const struct sock *sk,
3131 int flags, u32 if_id)
3132 {
3133 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3134 struct xfrm_dst *xdst;
3135 struct dst_entry *dst, *route;
3136 u16 family = dst_orig->ops->family;
3137 u8 dir = XFRM_POLICY_OUT;
3138 int i, err, num_pols, num_xfrms = 0, drop_pols = 0;
3139
3140 dst = NULL;
3141 xdst = NULL;
3142 route = NULL;
3143
3144 sk = sk_const_to_full_sk(sk);
3145 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
3146 num_pols = 1;
3147 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family,
3148 if_id);
3149 err = xfrm_expand_policies(fl, family, pols,
3150 &num_pols, &num_xfrms);
3151 if (err < 0)
3152 goto dropdst;
3153
3154 if (num_pols) {
3155 if (num_xfrms <= 0) {
3156 drop_pols = num_pols;
3157 goto no_transform;
3158 }
3159
3160 xdst = xfrm_resolve_and_create_bundle(
3161 pols, num_pols, fl,
3162 family, dst_orig);
3163
3164 if (IS_ERR(xdst)) {
3165 xfrm_pols_put(pols, num_pols);
3166 err = PTR_ERR(xdst);
3167 if (err == -EREMOTE)
3168 goto nopol;
3169
3170 goto dropdst;
3171 } else if (xdst == NULL) {
3172 num_xfrms = 0;
3173 drop_pols = num_pols;
3174 goto no_transform;
3175 }
3176
3177 route = xdst->route;
3178 }
3179 }
3180
3181 if (xdst == NULL) {
3182 struct xfrm_flo xflo;
3183
3184 xflo.dst_orig = dst_orig;
3185 xflo.flags = flags;
3186
3187 /* To accelerate a bit... */
3188 if (!if_id && ((dst_orig->flags & DST_NOXFRM) ||
3189 !net->xfrm.policy_count[XFRM_POLICY_OUT]))
3190 goto nopol;
3191
3192 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id);
3193 if (xdst == NULL)
3194 goto nopol;
3195 if (IS_ERR(xdst)) {
3196 err = PTR_ERR(xdst);
3197 goto dropdst;
3198 }
3199
3200 num_pols = xdst->num_pols;
3201 num_xfrms = xdst->num_xfrms;
3202 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols);
3203 route = xdst->route;
3204 }
3205
3206 dst = &xdst->u.dst;
3207 if (route == NULL && num_xfrms > 0) {
3208 /* The only case when xfrm_bundle_lookup() returns a
3209 * bundle with null route, is when the template could
3210 * not be resolved. It means policies are there, but
3211 * bundle could not be created, since we don't yet
3212 * have the xfrm_state's. We need to wait for KM to
3213 * negotiate new SA's or bail out with error.*/
3214 if (net->xfrm.sysctl_larval_drop) {
3215 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3216 err = -EREMOTE;
3217 goto error;
3218 }
3219
3220 err = -EAGAIN;
3221
3222 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
3223 goto error;
3224 }
3225
3226 no_transform:
3227 if (num_pols == 0)
3228 goto nopol;
3229
3230 if ((flags & XFRM_LOOKUP_ICMP) &&
3231 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
3232 err = -ENOENT;
3233 goto error;
3234 }
3235
3236 for (i = 0; i < num_pols; i++)
3237 WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds());
3238
3239 if (num_xfrms < 0) {
3240 /* Prohibit the flow */
3241 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
3242 err = -EPERM;
3243 goto error;
3244 } else if (num_xfrms > 0) {
3245 /* Flow transformed */
3246 dst_release(dst_orig);
3247 } else {
3248 /* Flow passes untransformed */
3249 dst_release(dst);
3250 dst = dst_orig;
3251 }
3252 ok:
3253 xfrm_pols_put(pols, drop_pols);
3254 if (dst && dst->xfrm &&
3255 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
3256 dst->flags |= DST_XFRM_TUNNEL;
3257 return dst;
3258
3259 nopol:
3260 if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) &&
3261 net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3262 err = -EPERM;
3263 goto error;
3264 }
3265 if (!(flags & XFRM_LOOKUP_ICMP)) {
3266 dst = dst_orig;
3267 goto ok;
3268 }
3269 err = -ENOENT;
3270 error:
3271 dst_release(dst);
3272 dropdst:
3273 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF))
3274 dst_release(dst_orig);
3275 xfrm_pols_put(pols, drop_pols);
3276 return ERR_PTR(err);
3277 }
3278 EXPORT_SYMBOL(xfrm_lookup_with_ifid);
3279
3280 /* Main function: finds/creates a bundle for given flow.
3281 *
3282 * At the moment we eat a raw IP route. Mostly to speed up lookups
3283 * on interfaces with disabled IPsec.
3284 */
xfrm_lookup(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3285 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
3286 const struct flowi *fl, const struct sock *sk,
3287 int flags)
3288 {
3289 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0);
3290 }
3291 EXPORT_SYMBOL(xfrm_lookup);
3292
3293 /* Callers of xfrm_lookup_route() must ensure a call to dst_output().
3294 * Otherwise we may send out blackholed packets.
3295 */
xfrm_lookup_route(struct net * net,struct dst_entry * dst_orig,const struct flowi * fl,const struct sock * sk,int flags)3296 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
3297 const struct flowi *fl,
3298 const struct sock *sk, int flags)
3299 {
3300 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk,
3301 flags | XFRM_LOOKUP_QUEUE |
3302 XFRM_LOOKUP_KEEP_DST_REF);
3303
3304 if (PTR_ERR(dst) == -EREMOTE)
3305 return make_blackhole(net, dst_orig->ops->family, dst_orig);
3306
3307 if (IS_ERR(dst))
3308 dst_release(dst_orig);
3309
3310 return dst;
3311 }
3312 EXPORT_SYMBOL(xfrm_lookup_route);
3313
3314 static inline int
xfrm_secpath_reject(int idx,struct sk_buff * skb,const struct flowi * fl)3315 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl)
3316 {
3317 struct sec_path *sp = skb_sec_path(skb);
3318 struct xfrm_state *x;
3319
3320 if (!sp || idx < 0 || idx >= sp->len)
3321 return 0;
3322 x = sp->xvec[idx];
3323 if (!x->type->reject)
3324 return 0;
3325 return x->type->reject(x, skb, fl);
3326 }
3327
3328 /* When skb is transformed back to its "native" form, we have to
3329 * check policy restrictions. At the moment we make this in maximally
3330 * stupid way. Shame on me. :-) Of course, connected sockets must
3331 * have policy cached at them.
3332 */
3333
3334 static inline int
xfrm_state_ok(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family,u32 if_id)3335 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x,
3336 unsigned short family, u32 if_id)
3337 {
3338 if (xfrm_state_kern(x))
3339 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family);
3340 return x->id.proto == tmpl->id.proto &&
3341 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
3342 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
3343 x->props.mode == tmpl->mode &&
3344 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
3345 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
3346 !(x->props.mode != XFRM_MODE_TRANSPORT &&
3347 xfrm_state_addr_cmp(tmpl, x, family)) &&
3348 (if_id == 0 || if_id == x->if_id);
3349 }
3350
3351 /*
3352 * 0 or more than 0 is returned when validation is succeeded (either bypass
3353 * because of optional transport mode, or next index of the matched secpath
3354 * state with the template.
3355 * -1 is returned when no matching template is found.
3356 * Otherwise "-2 - errored_index" is returned.
3357 */
3358 static inline int
xfrm_policy_ok(const struct xfrm_tmpl * tmpl,const struct sec_path * sp,int start,unsigned short family,u32 if_id)3359 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start,
3360 unsigned short family, u32 if_id)
3361 {
3362 int idx = start;
3363
3364 if (tmpl->optional) {
3365 if (tmpl->mode == XFRM_MODE_TRANSPORT)
3366 return start;
3367 } else
3368 start = -1;
3369 for (; idx < sp->len; idx++) {
3370 if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id))
3371 return ++idx;
3372 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) {
3373 if (idx < sp->verified_cnt) {
3374 /* Secpath entry previously verified, consider optional and
3375 * continue searching
3376 */
3377 continue;
3378 }
3379
3380 if (start == -1)
3381 start = -2-idx;
3382 break;
3383 }
3384 }
3385 return start;
3386 }
3387
3388 static void
decode_session4(const struct xfrm_flow_keys * flkeys,struct flowi * fl,bool reverse)3389 decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3390 {
3391 struct flowi4 *fl4 = &fl->u.ip4;
3392
3393 memset(fl4, 0, sizeof(struct flowi4));
3394
3395 if (reverse) {
3396 fl4->saddr = flkeys->addrs.ipv4.dst;
3397 fl4->daddr = flkeys->addrs.ipv4.src;
3398 fl4->fl4_sport = flkeys->ports.dst;
3399 fl4->fl4_dport = flkeys->ports.src;
3400 } else {
3401 fl4->saddr = flkeys->addrs.ipv4.src;
3402 fl4->daddr = flkeys->addrs.ipv4.dst;
3403 fl4->fl4_sport = flkeys->ports.src;
3404 fl4->fl4_dport = flkeys->ports.dst;
3405 }
3406
3407 switch (flkeys->basic.ip_proto) {
3408 case IPPROTO_GRE:
3409 fl4->fl4_gre_key = flkeys->gre.keyid;
3410 break;
3411 case IPPROTO_ICMP:
3412 fl4->fl4_icmp_type = flkeys->icmp.type;
3413 fl4->fl4_icmp_code = flkeys->icmp.code;
3414 break;
3415 }
3416
3417 fl4->flowi4_proto = flkeys->basic.ip_proto;
3418 fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK;
3419 }
3420
3421 #if IS_ENABLED(CONFIG_IPV6)
3422 static void
decode_session6(const struct xfrm_flow_keys * flkeys,struct flowi * fl,bool reverse)3423 decode_session6(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse)
3424 {
3425 struct flowi6 *fl6 = &fl->u.ip6;
3426
3427 memset(fl6, 0, sizeof(struct flowi6));
3428
3429 if (reverse) {
3430 fl6->saddr = flkeys->addrs.ipv6.dst;
3431 fl6->daddr = flkeys->addrs.ipv6.src;
3432 fl6->fl6_sport = flkeys->ports.dst;
3433 fl6->fl6_dport = flkeys->ports.src;
3434 } else {
3435 fl6->saddr = flkeys->addrs.ipv6.src;
3436 fl6->daddr = flkeys->addrs.ipv6.dst;
3437 fl6->fl6_sport = flkeys->ports.src;
3438 fl6->fl6_dport = flkeys->ports.dst;
3439 }
3440
3441 switch (flkeys->basic.ip_proto) {
3442 case IPPROTO_GRE:
3443 fl6->fl6_gre_key = flkeys->gre.keyid;
3444 break;
3445 case IPPROTO_ICMPV6:
3446 fl6->fl6_icmp_type = flkeys->icmp.type;
3447 fl6->fl6_icmp_code = flkeys->icmp.code;
3448 break;
3449 }
3450
3451 fl6->flowi6_proto = flkeys->basic.ip_proto;
3452 }
3453 #endif
3454
__xfrm_decode_session(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family,int reverse)3455 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
3456 unsigned int family, int reverse)
3457 {
3458 struct xfrm_flow_keys flkeys;
3459
3460 memset(&flkeys, 0, sizeof(flkeys));
3461 __skb_flow_dissect(net, skb, &xfrm_session_dissector, &flkeys,
3462 NULL, 0, 0, 0, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
3463
3464 switch (family) {
3465 case AF_INET:
3466 decode_session4(&flkeys, fl, reverse);
3467 break;
3468 #if IS_ENABLED(CONFIG_IPV6)
3469 case AF_INET6:
3470 decode_session6(&flkeys, fl, reverse);
3471 break;
3472 #endif
3473 default:
3474 return -EAFNOSUPPORT;
3475 }
3476
3477 fl->flowi_mark = skb->mark;
3478 if (reverse) {
3479 fl->flowi_oif = skb->skb_iif;
3480 } else {
3481 int oif = 0;
3482
3483 if (skb_dst(skb) && skb_dst(skb)->dev)
3484 oif = skb_dst(skb)->dev->ifindex;
3485
3486 fl->flowi_oif = oif;
3487 }
3488
3489 return security_xfrm_decode_session(skb, &fl->flowi_secid);
3490 }
3491 EXPORT_SYMBOL(__xfrm_decode_session);
3492
secpath_has_nontransport(const struct sec_path * sp,int k,int * idxp)3493 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp)
3494 {
3495 for (; k < sp->len; k++) {
3496 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) {
3497 *idxp = k;
3498 return 1;
3499 }
3500 }
3501
3502 return 0;
3503 }
3504
icmp_err_packet(const struct flowi * fl,unsigned short family)3505 static bool icmp_err_packet(const struct flowi *fl, unsigned short family)
3506 {
3507 const struct flowi4 *fl4 = &fl->u.ip4;
3508
3509 if (family == AF_INET &&
3510 fl4->flowi4_proto == IPPROTO_ICMP &&
3511 (fl4->fl4_icmp_type == ICMP_DEST_UNREACH ||
3512 fl4->fl4_icmp_type == ICMP_TIME_EXCEEDED))
3513 return true;
3514
3515 #if IS_ENABLED(CONFIG_IPV6)
3516 if (family == AF_INET6) {
3517 const struct flowi6 *fl6 = &fl->u.ip6;
3518
3519 if (fl6->flowi6_proto == IPPROTO_ICMPV6 &&
3520 (fl6->fl6_icmp_type == ICMPV6_DEST_UNREACH ||
3521 fl6->fl6_icmp_type == ICMPV6_PKT_TOOBIG ||
3522 fl6->fl6_icmp_type == ICMPV6_TIME_EXCEED))
3523 return true;
3524 }
3525 #endif
3526 return false;
3527 }
3528
xfrm_icmp_flow_decode(struct sk_buff * skb,unsigned short family,const struct flowi * fl,struct flowi * fl1)3529 static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family,
3530 const struct flowi *fl, struct flowi *fl1)
3531 {
3532 bool ret = true;
3533 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
3534 int hl = family == AF_INET ? (sizeof(struct iphdr) + sizeof(struct icmphdr)) :
3535 (sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
3536
3537 if (!newskb)
3538 return true;
3539
3540 if (!pskb_pull(newskb, hl))
3541 goto out;
3542
3543 skb_reset_network_header(newskb);
3544
3545 if (xfrm_decode_session_reverse(dev_net(skb->dev), newskb, fl1, family) < 0)
3546 goto out;
3547
3548 fl1->flowi_oif = fl->flowi_oif;
3549 fl1->flowi_mark = fl->flowi_mark;
3550 fl1->flowi_tos = fl->flowi_tos;
3551 nf_nat_decode_session(newskb, fl1, family);
3552 ret = false;
3553
3554 out:
3555 consume_skb(newskb);
3556 return ret;
3557 }
3558
xfrm_selector_inner_icmp_match(struct sk_buff * skb,unsigned short family,const struct xfrm_selector * sel,const struct flowi * fl)3559 static bool xfrm_selector_inner_icmp_match(struct sk_buff *skb, unsigned short family,
3560 const struct xfrm_selector *sel,
3561 const struct flowi *fl)
3562 {
3563 bool ret = false;
3564
3565 if (icmp_err_packet(fl, family)) {
3566 struct flowi fl1;
3567
3568 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3569 return ret;
3570
3571 ret = xfrm_selector_match(sel, &fl1, family);
3572 }
3573
3574 return ret;
3575 }
3576
3577 static inline struct
xfrm_in_fwd_icmp(struct sk_buff * skb,const struct flowi * fl,unsigned short family,u32 if_id)3578 xfrm_policy *xfrm_in_fwd_icmp(struct sk_buff *skb,
3579 const struct flowi *fl, unsigned short family,
3580 u32 if_id)
3581 {
3582 struct xfrm_policy *pol = NULL;
3583
3584 if (icmp_err_packet(fl, family)) {
3585 struct flowi fl1;
3586 struct net *net = dev_net(skb->dev);
3587
3588 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3589 return pol;
3590
3591 pol = xfrm_policy_lookup(net, &fl1, family, XFRM_POLICY_FWD, if_id);
3592 if (IS_ERR(pol))
3593 pol = NULL;
3594 }
3595
3596 return pol;
3597 }
3598
3599 static inline struct
xfrm_out_fwd_icmp(struct sk_buff * skb,struct flowi * fl,unsigned short family,struct dst_entry * dst)3600 dst_entry *xfrm_out_fwd_icmp(struct sk_buff *skb, struct flowi *fl,
3601 unsigned short family, struct dst_entry *dst)
3602 {
3603 if (icmp_err_packet(fl, family)) {
3604 struct net *net = dev_net(skb->dev);
3605 struct dst_entry *dst2;
3606 struct flowi fl1;
3607
3608 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1))
3609 return dst;
3610
3611 dst_hold(dst);
3612
3613 dst2 = xfrm_lookup(net, dst, &fl1, NULL, (XFRM_LOOKUP_QUEUE | XFRM_LOOKUP_ICMP));
3614
3615 if (IS_ERR(dst2))
3616 return dst;
3617
3618 if (dst2->xfrm) {
3619 dst_release(dst);
3620 dst = dst2;
3621 } else {
3622 dst_release(dst2);
3623 }
3624 }
3625
3626 return dst;
3627 }
3628
__xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)3629 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
3630 unsigned short family)
3631 {
3632 struct net *net = dev_net(skb->dev);
3633 struct xfrm_policy *pol;
3634 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3635 int npols = 0;
3636 int xfrm_nr;
3637 int pi;
3638 int reverse;
3639 struct flowi fl;
3640 int xerr_idx = -1;
3641 const struct xfrm_if_cb *ifcb;
3642 struct sec_path *sp;
3643 u32 if_id = 0;
3644
3645 rcu_read_lock();
3646 ifcb = xfrm_if_get_cb();
3647
3648 if (ifcb) {
3649 struct xfrm_if_decode_session_result r;
3650
3651 if (ifcb->decode_session(skb, family, &r)) {
3652 if_id = r.if_id;
3653 net = r.net;
3654 }
3655 }
3656 rcu_read_unlock();
3657
3658 reverse = dir & ~XFRM_POLICY_MASK;
3659 dir &= XFRM_POLICY_MASK;
3660
3661 if (__xfrm_decode_session(net, skb, &fl, family, reverse) < 0) {
3662 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
3663 return 0;
3664 }
3665
3666 nf_nat_decode_session(skb, &fl, family);
3667
3668 /* First, check used SA against their selectors. */
3669 sp = skb_sec_path(skb);
3670 if (sp) {
3671 int i;
3672
3673 for (i = sp->len - 1; i >= 0; i--) {
3674 struct xfrm_state *x = sp->xvec[i];
3675 int ret = 0;
3676
3677 if (!xfrm_selector_match(&x->sel, &fl, family)) {
3678 ret = 1;
3679 if (x->props.flags & XFRM_STATE_ICMP &&
3680 xfrm_selector_inner_icmp_match(skb, family, &x->sel, &fl))
3681 ret = 0;
3682 if (ret) {
3683 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH);
3684 return 0;
3685 }
3686 }
3687 }
3688 }
3689
3690 pol = NULL;
3691 sk = sk_to_full_sk(sk);
3692 if (sk && sk->sk_policy[dir]) {
3693 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id);
3694 if (IS_ERR(pol)) {
3695 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3696 return 0;
3697 }
3698 }
3699
3700 if (!pol)
3701 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id);
3702
3703 if (IS_ERR(pol)) {
3704 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3705 return 0;
3706 }
3707
3708 if (!pol && dir == XFRM_POLICY_FWD)
3709 pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id);
3710
3711 if (!pol) {
3712 const bool is_crypto_offload = sp &&
3713 (xfrm_input_state(skb)->xso.type == XFRM_DEV_OFFLOAD_CRYPTO);
3714
3715 if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) {
3716 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3717 return 0;
3718 }
3719
3720 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx) && !is_crypto_offload) {
3721 xfrm_secpath_reject(xerr_idx, skb, &fl);
3722 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS);
3723 return 0;
3724 }
3725 return 1;
3726 }
3727
3728 /* This lockless write can happen from different cpus. */
3729 WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds());
3730
3731 pols[0] = pol;
3732 npols++;
3733 #ifdef CONFIG_XFRM_SUB_POLICY
3734 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
3735 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN,
3736 &fl, family,
3737 XFRM_POLICY_IN, if_id);
3738 if (pols[1]) {
3739 if (IS_ERR(pols[1])) {
3740 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
3741 xfrm_pol_put(pols[0]);
3742 return 0;
3743 }
3744 /* This write can happen from different cpus. */
3745 WRITE_ONCE(pols[1]->curlft.use_time,
3746 ktime_get_real_seconds());
3747 npols++;
3748 }
3749 }
3750 #endif
3751
3752 if (pol->action == XFRM_POLICY_ALLOW) {
3753 static struct sec_path dummy;
3754 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH];
3755 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH];
3756 struct xfrm_tmpl **tpp = tp;
3757 int ti = 0;
3758 int i, k;
3759
3760 sp = skb_sec_path(skb);
3761 if (!sp)
3762 sp = &dummy;
3763
3764 for (pi = 0; pi < npols; pi++) {
3765 if (pols[pi] != pol &&
3766 pols[pi]->action != XFRM_POLICY_ALLOW) {
3767 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3768 goto reject;
3769 }
3770 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) {
3771 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
3772 goto reject_error;
3773 }
3774 for (i = 0; i < pols[pi]->xfrm_nr; i++)
3775 tpp[ti++] = &pols[pi]->xfrm_vec[i];
3776 }
3777 xfrm_nr = ti;
3778
3779 if (npols > 1) {
3780 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family);
3781 tpp = stp;
3782 }
3783
3784 /* For each tunnel xfrm, find the first matching tmpl.
3785 * For each tmpl before that, find corresponding xfrm.
3786 * Order is _important_. Later we will implement
3787 * some barriers, but at the moment barriers
3788 * are implied between each two transformations.
3789 * Upon success, marks secpath entries as having been
3790 * verified to allow them to be skipped in future policy
3791 * checks (e.g. nested tunnels).
3792 */
3793 for (i = xfrm_nr-1, k = 0; i >= 0; i--) {
3794 k = xfrm_policy_ok(tpp[i], sp, k, family, if_id);
3795 if (k < 0) {
3796 if (k < -1)
3797 /* "-2 - errored_index" returned */
3798 xerr_idx = -(2+k);
3799 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3800 goto reject;
3801 }
3802 }
3803
3804 if (secpath_has_nontransport(sp, k, &xerr_idx)) {
3805 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH);
3806 goto reject;
3807 }
3808
3809 xfrm_pols_put(pols, npols);
3810 sp->verified_cnt = k;
3811
3812 return 1;
3813 }
3814 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK);
3815
3816 reject:
3817 xfrm_secpath_reject(xerr_idx, skb, &fl);
3818 reject_error:
3819 xfrm_pols_put(pols, npols);
3820 return 0;
3821 }
3822 EXPORT_SYMBOL(__xfrm_policy_check);
3823
__xfrm_route_forward(struct sk_buff * skb,unsigned short family)3824 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family)
3825 {
3826 struct net *net = dev_net(skb->dev);
3827 struct flowi fl;
3828 struct dst_entry *dst;
3829 int res = 1;
3830
3831 if (xfrm_decode_session(net, skb, &fl, family) < 0) {
3832 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3833 return 0;
3834 }
3835
3836 skb_dst_force(skb);
3837 if (!skb_dst(skb)) {
3838 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR);
3839 return 0;
3840 }
3841
3842 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE);
3843 if (IS_ERR(dst)) {
3844 res = 0;
3845 dst = NULL;
3846 }
3847
3848 if (dst && !dst->xfrm)
3849 dst = xfrm_out_fwd_icmp(skb, &fl, family, dst);
3850
3851 skb_dst_set(skb, dst);
3852 return res;
3853 }
3854 EXPORT_SYMBOL(__xfrm_route_forward);
3855
3856 /* Optimize later using cookies and generation ids. */
3857
xfrm_dst_check(struct dst_entry * dst,u32 cookie)3858 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie)
3859 {
3860 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete
3861 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to
3862 * get validated by dst_ops->check on every use. We do this
3863 * because when a normal route referenced by an XFRM dst is
3864 * obsoleted we do not go looking around for all parent
3865 * referencing XFRM dsts so that we can invalidate them. It
3866 * is just too much work. Instead we make the checks here on
3867 * every use. For example:
3868 *
3869 * XFRM dst A --> IPv4 dst X
3870 *
3871 * X is the "xdst->route" of A (X is also the "dst->path" of A
3872 * in this example). If X is marked obsolete, "A" will not
3873 * notice. That's what we are validating here via the
3874 * stale_bundle() check.
3875 *
3876 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will
3877 * be marked on it.
3878 * This will force stale_bundle() to fail on any xdst bundle with
3879 * this dst linked in it.
3880 */
3881 if (dst->obsolete < 0 && !stale_bundle(dst))
3882 return dst;
3883
3884 return NULL;
3885 }
3886
stale_bundle(struct dst_entry * dst)3887 static int stale_bundle(struct dst_entry *dst)
3888 {
3889 return !xfrm_bundle_ok((struct xfrm_dst *)dst);
3890 }
3891
xfrm_dst_ifdown(struct dst_entry * dst,struct net_device * dev)3892 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
3893 {
3894 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) {
3895 dst->dev = blackhole_netdev;
3896 dev_hold(dst->dev);
3897 dev_put(dev);
3898 }
3899 }
3900 EXPORT_SYMBOL(xfrm_dst_ifdown);
3901
xfrm_link_failure(struct sk_buff * skb)3902 static void xfrm_link_failure(struct sk_buff *skb)
3903 {
3904 /* Impossible. Such dst must be popped before reaches point of failure. */
3905 }
3906
xfrm_negative_advice(struct sock * sk,struct dst_entry * dst)3907 static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
3908 {
3909 if (dst->obsolete)
3910 sk_dst_reset(sk);
3911 }
3912
xfrm_init_pmtu(struct xfrm_dst ** bundle,int nr)3913 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
3914 {
3915 while (nr--) {
3916 struct xfrm_dst *xdst = bundle[nr];
3917 u32 pmtu, route_mtu_cached;
3918 struct dst_entry *dst;
3919
3920 dst = &xdst->u.dst;
3921 pmtu = dst_mtu(xfrm_dst_child(dst));
3922 xdst->child_mtu_cached = pmtu;
3923
3924 pmtu = xfrm_state_mtu(dst->xfrm, pmtu);
3925
3926 route_mtu_cached = dst_mtu(xdst->route);
3927 xdst->route_mtu_cached = route_mtu_cached;
3928
3929 if (pmtu > route_mtu_cached)
3930 pmtu = route_mtu_cached;
3931
3932 dst_metric_set(dst, RTAX_MTU, pmtu);
3933 }
3934 }
3935
3936 /* Check that the bundle accepts the flow and its components are
3937 * still valid.
3938 */
3939
xfrm_bundle_ok(struct xfrm_dst * first)3940 static int xfrm_bundle_ok(struct xfrm_dst *first)
3941 {
3942 struct xfrm_dst *bundle[XFRM_MAX_DEPTH];
3943 struct dst_entry *dst = &first->u.dst;
3944 struct xfrm_dst *xdst;
3945 int start_from, nr;
3946 u32 mtu;
3947
3948 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) ||
3949 (dst->dev && !netif_running(dst->dev)))
3950 return 0;
3951
3952 if (dst->flags & DST_XFRM_QUEUE)
3953 return 1;
3954
3955 start_from = nr = 0;
3956 do {
3957 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
3958
3959 if (dst->xfrm->km.state != XFRM_STATE_VALID)
3960 return 0;
3961 if (xdst->xfrm_genid != dst->xfrm->genid)
3962 return 0;
3963 if (xdst->num_pols > 0 &&
3964 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
3965 return 0;
3966
3967 bundle[nr++] = xdst;
3968
3969 mtu = dst_mtu(xfrm_dst_child(dst));
3970 if (xdst->child_mtu_cached != mtu) {
3971 start_from = nr;
3972 xdst->child_mtu_cached = mtu;
3973 }
3974
3975 if (!dst_check(xdst->route, xdst->route_cookie))
3976 return 0;
3977 mtu = dst_mtu(xdst->route);
3978 if (xdst->route_mtu_cached != mtu) {
3979 start_from = nr;
3980 xdst->route_mtu_cached = mtu;
3981 }
3982
3983 dst = xfrm_dst_child(dst);
3984 } while (dst->xfrm);
3985
3986 if (likely(!start_from))
3987 return 1;
3988
3989 xdst = bundle[start_from - 1];
3990 mtu = xdst->child_mtu_cached;
3991 while (start_from--) {
3992 dst = &xdst->u.dst;
3993
3994 mtu = xfrm_state_mtu(dst->xfrm, mtu);
3995 if (mtu > xdst->route_mtu_cached)
3996 mtu = xdst->route_mtu_cached;
3997 dst_metric_set(dst, RTAX_MTU, mtu);
3998 if (!start_from)
3999 break;
4000
4001 xdst = bundle[start_from - 1];
4002 xdst->child_mtu_cached = mtu;
4003 }
4004
4005 return 1;
4006 }
4007
xfrm_default_advmss(const struct dst_entry * dst)4008 static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
4009 {
4010 return dst_metric_advmss(xfrm_dst_path(dst));
4011 }
4012
xfrm_mtu(const struct dst_entry * dst)4013 static unsigned int xfrm_mtu(const struct dst_entry *dst)
4014 {
4015 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
4016
4017 return mtu ? : dst_mtu(xfrm_dst_path(dst));
4018 }
4019
xfrm_get_dst_nexthop(const struct dst_entry * dst,const void * daddr)4020 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst,
4021 const void *daddr)
4022 {
4023 while (dst->xfrm) {
4024 const struct xfrm_state *xfrm = dst->xfrm;
4025
4026 dst = xfrm_dst_child(dst);
4027
4028 if (xfrm->props.mode == XFRM_MODE_TRANSPORT)
4029 continue;
4030 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR)
4031 daddr = xfrm->coaddr;
4032 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR))
4033 daddr = &xfrm->id.daddr;
4034 }
4035 return daddr;
4036 }
4037
xfrm_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)4038 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst,
4039 struct sk_buff *skb,
4040 const void *daddr)
4041 {
4042 const struct dst_entry *path = xfrm_dst_path(dst);
4043
4044 if (!skb)
4045 daddr = xfrm_get_dst_nexthop(dst, daddr);
4046 return path->ops->neigh_lookup(path, skb, daddr);
4047 }
4048
xfrm_confirm_neigh(const struct dst_entry * dst,const void * daddr)4049 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr)
4050 {
4051 const struct dst_entry *path = xfrm_dst_path(dst);
4052
4053 daddr = xfrm_get_dst_nexthop(dst, daddr);
4054 path->ops->confirm_neigh(path, daddr);
4055 }
4056
xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo * afinfo,int family)4057 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family)
4058 {
4059 int err = 0;
4060
4061 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo)))
4062 return -EAFNOSUPPORT;
4063
4064 spin_lock(&xfrm_policy_afinfo_lock);
4065 if (unlikely(xfrm_policy_afinfo[family] != NULL))
4066 err = -EEXIST;
4067 else {
4068 struct dst_ops *dst_ops = afinfo->dst_ops;
4069 if (likely(dst_ops->kmem_cachep == NULL))
4070 dst_ops->kmem_cachep = xfrm_dst_cache;
4071 if (likely(dst_ops->check == NULL))
4072 dst_ops->check = xfrm_dst_check;
4073 if (likely(dst_ops->default_advmss == NULL))
4074 dst_ops->default_advmss = xfrm_default_advmss;
4075 if (likely(dst_ops->mtu == NULL))
4076 dst_ops->mtu = xfrm_mtu;
4077 if (likely(dst_ops->negative_advice == NULL))
4078 dst_ops->negative_advice = xfrm_negative_advice;
4079 if (likely(dst_ops->link_failure == NULL))
4080 dst_ops->link_failure = xfrm_link_failure;
4081 if (likely(dst_ops->neigh_lookup == NULL))
4082 dst_ops->neigh_lookup = xfrm_neigh_lookup;
4083 if (likely(!dst_ops->confirm_neigh))
4084 dst_ops->confirm_neigh = xfrm_confirm_neigh;
4085 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo);
4086 }
4087 spin_unlock(&xfrm_policy_afinfo_lock);
4088
4089 return err;
4090 }
4091 EXPORT_SYMBOL(xfrm_policy_register_afinfo);
4092
xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo * afinfo)4093 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo)
4094 {
4095 struct dst_ops *dst_ops = afinfo->dst_ops;
4096 int i;
4097
4098 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) {
4099 if (xfrm_policy_afinfo[i] != afinfo)
4100 continue;
4101 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL);
4102 break;
4103 }
4104
4105 synchronize_rcu();
4106
4107 dst_ops->kmem_cachep = NULL;
4108 dst_ops->check = NULL;
4109 dst_ops->negative_advice = NULL;
4110 dst_ops->link_failure = NULL;
4111 }
4112 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo);
4113
xfrm_if_register_cb(const struct xfrm_if_cb * ifcb)4114 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb)
4115 {
4116 spin_lock(&xfrm_if_cb_lock);
4117 rcu_assign_pointer(xfrm_if_cb, ifcb);
4118 spin_unlock(&xfrm_if_cb_lock);
4119 }
4120 EXPORT_SYMBOL(xfrm_if_register_cb);
4121
xfrm_if_unregister_cb(void)4122 void xfrm_if_unregister_cb(void)
4123 {
4124 RCU_INIT_POINTER(xfrm_if_cb, NULL);
4125 synchronize_rcu();
4126 }
4127 EXPORT_SYMBOL(xfrm_if_unregister_cb);
4128
4129 #ifdef CONFIG_XFRM_STATISTICS
xfrm_statistics_init(struct net * net)4130 static int __net_init xfrm_statistics_init(struct net *net)
4131 {
4132 int rv;
4133 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib);
4134 if (!net->mib.xfrm_statistics)
4135 return -ENOMEM;
4136 rv = xfrm_proc_init(net);
4137 if (rv < 0)
4138 free_percpu(net->mib.xfrm_statistics);
4139 return rv;
4140 }
4141
xfrm_statistics_fini(struct net * net)4142 static void xfrm_statistics_fini(struct net *net)
4143 {
4144 xfrm_proc_fini(net);
4145 free_percpu(net->mib.xfrm_statistics);
4146 }
4147 #else
xfrm_statistics_init(struct net * net)4148 static int __net_init xfrm_statistics_init(struct net *net)
4149 {
4150 return 0;
4151 }
4152
xfrm_statistics_fini(struct net * net)4153 static void xfrm_statistics_fini(struct net *net)
4154 {
4155 }
4156 #endif
4157
xfrm_policy_init(struct net * net)4158 static int __net_init xfrm_policy_init(struct net *net)
4159 {
4160 unsigned int hmask, sz;
4161 int dir, err;
4162
4163 if (net_eq(net, &init_net)) {
4164 xfrm_dst_cache = KMEM_CACHE(xfrm_dst, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4165 err = rhashtable_init(&xfrm_policy_inexact_table,
4166 &xfrm_pol_inexact_params);
4167 BUG_ON(err);
4168 }
4169
4170 hmask = 8 - 1;
4171 sz = (hmask+1) * sizeof(struct hlist_head);
4172
4173 net->xfrm.policy_byidx = xfrm_hash_alloc(sz);
4174 if (!net->xfrm.policy_byidx)
4175 goto out_byidx;
4176 net->xfrm.policy_idx_hmask = hmask;
4177
4178 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4179 struct xfrm_policy_hash *htab;
4180
4181 net->xfrm.policy_count[dir] = 0;
4182 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0;
4183 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]);
4184
4185 htab = &net->xfrm.policy_bydst[dir];
4186 htab->table = xfrm_hash_alloc(sz);
4187 if (!htab->table)
4188 goto out_bydst;
4189 htab->hmask = hmask;
4190 htab->dbits4 = 32;
4191 htab->sbits4 = 32;
4192 htab->dbits6 = 128;
4193 htab->sbits6 = 128;
4194 }
4195 net->xfrm.policy_hthresh.lbits4 = 32;
4196 net->xfrm.policy_hthresh.rbits4 = 32;
4197 net->xfrm.policy_hthresh.lbits6 = 128;
4198 net->xfrm.policy_hthresh.rbits6 = 128;
4199
4200 seqlock_init(&net->xfrm.policy_hthresh.lock);
4201
4202 INIT_LIST_HEAD(&net->xfrm.policy_all);
4203 INIT_LIST_HEAD(&net->xfrm.inexact_bins);
4204 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize);
4205 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild);
4206 return 0;
4207
4208 out_bydst:
4209 for (dir--; dir >= 0; dir--) {
4210 struct xfrm_policy_hash *htab;
4211
4212 htab = &net->xfrm.policy_bydst[dir];
4213 xfrm_hash_free(htab->table, sz);
4214 }
4215 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4216 out_byidx:
4217 return -ENOMEM;
4218 }
4219
xfrm_policy_fini(struct net * net)4220 static void xfrm_policy_fini(struct net *net)
4221 {
4222 struct xfrm_pol_inexact_bin *b, *t;
4223 unsigned int sz;
4224 int dir;
4225
4226 flush_work(&net->xfrm.policy_hash_work);
4227 #ifdef CONFIG_XFRM_SUB_POLICY
4228 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false);
4229 #endif
4230 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false);
4231
4232 WARN_ON(!list_empty(&net->xfrm.policy_all));
4233
4234 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) {
4235 struct xfrm_policy_hash *htab;
4236
4237 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir]));
4238
4239 htab = &net->xfrm.policy_bydst[dir];
4240 sz = (htab->hmask + 1) * sizeof(struct hlist_head);
4241 WARN_ON(!hlist_empty(htab->table));
4242 xfrm_hash_free(htab->table, sz);
4243 }
4244
4245 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head);
4246 WARN_ON(!hlist_empty(net->xfrm.policy_byidx));
4247 xfrm_hash_free(net->xfrm.policy_byidx, sz);
4248
4249 spin_lock_bh(&net->xfrm.xfrm_policy_lock);
4250 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins)
4251 __xfrm_policy_inexact_prune_bin(b, true);
4252 spin_unlock_bh(&net->xfrm.xfrm_policy_lock);
4253 }
4254
xfrm_net_init(struct net * net)4255 static int __net_init xfrm_net_init(struct net *net)
4256 {
4257 int rv;
4258
4259 /* Initialize the per-net locks here */
4260 spin_lock_init(&net->xfrm.xfrm_state_lock);
4261 spin_lock_init(&net->xfrm.xfrm_policy_lock);
4262 seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock);
4263 mutex_init(&net->xfrm.xfrm_cfg_mutex);
4264 net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT;
4265 net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT;
4266 net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT;
4267
4268 rv = xfrm_statistics_init(net);
4269 if (rv < 0)
4270 goto out_statistics;
4271 rv = xfrm_state_init(net);
4272 if (rv < 0)
4273 goto out_state;
4274 rv = xfrm_policy_init(net);
4275 if (rv < 0)
4276 goto out_policy;
4277 rv = xfrm_sysctl_init(net);
4278 if (rv < 0)
4279 goto out_sysctl;
4280
4281 rv = xfrm_nat_keepalive_net_init(net);
4282 if (rv < 0)
4283 goto out_nat_keepalive;
4284
4285 return 0;
4286
4287 out_nat_keepalive:
4288 xfrm_sysctl_fini(net);
4289 out_sysctl:
4290 xfrm_policy_fini(net);
4291 out_policy:
4292 xfrm_state_fini(net);
4293 out_state:
4294 xfrm_statistics_fini(net);
4295 out_statistics:
4296 return rv;
4297 }
4298
xfrm_net_exit(struct net * net)4299 static void __net_exit xfrm_net_exit(struct net *net)
4300 {
4301 xfrm_nat_keepalive_net_fini(net);
4302 xfrm_sysctl_fini(net);
4303 xfrm_policy_fini(net);
4304 xfrm_state_fini(net);
4305 xfrm_statistics_fini(net);
4306 }
4307
4308 static struct pernet_operations __net_initdata xfrm_net_ops = {
4309 .init = xfrm_net_init,
4310 .exit = xfrm_net_exit,
4311 };
4312
4313 static const struct flow_dissector_key xfrm_flow_dissector_keys[] = {
4314 {
4315 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
4316 .offset = offsetof(struct xfrm_flow_keys, control),
4317 },
4318 {
4319 .key_id = FLOW_DISSECTOR_KEY_BASIC,
4320 .offset = offsetof(struct xfrm_flow_keys, basic),
4321 },
4322 {
4323 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
4324 .offset = offsetof(struct xfrm_flow_keys, addrs.ipv4),
4325 },
4326 {
4327 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
4328 .offset = offsetof(struct xfrm_flow_keys, addrs.ipv6),
4329 },
4330 {
4331 .key_id = FLOW_DISSECTOR_KEY_PORTS,
4332 .offset = offsetof(struct xfrm_flow_keys, ports),
4333 },
4334 {
4335 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID,
4336 .offset = offsetof(struct xfrm_flow_keys, gre),
4337 },
4338 {
4339 .key_id = FLOW_DISSECTOR_KEY_IP,
4340 .offset = offsetof(struct xfrm_flow_keys, ip),
4341 },
4342 {
4343 .key_id = FLOW_DISSECTOR_KEY_ICMP,
4344 .offset = offsetof(struct xfrm_flow_keys, icmp),
4345 },
4346 };
4347
xfrm_init(void)4348 void __init xfrm_init(void)
4349 {
4350 skb_flow_dissector_init(&xfrm_session_dissector,
4351 xfrm_flow_dissector_keys,
4352 ARRAY_SIZE(xfrm_flow_dissector_keys));
4353
4354 register_pernet_subsys(&xfrm_net_ops);
4355 xfrm_dev_init();
4356 xfrm_input_init();
4357
4358 #ifdef CONFIG_XFRM_ESPINTCP
4359 espintcp_init();
4360 #endif
4361
4362 register_xfrm_state_bpf();
4363 xfrm_nat_keepalive_init(AF_INET);
4364 }
4365
4366 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_common_policyinfo(struct xfrm_policy * xp,struct audit_buffer * audit_buf)4367 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
4368 struct audit_buffer *audit_buf)
4369 {
4370 struct xfrm_sec_ctx *ctx = xp->security;
4371 struct xfrm_selector *sel = &xp->selector;
4372
4373 if (ctx)
4374 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
4375 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
4376
4377 switch (sel->family) {
4378 case AF_INET:
4379 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4);
4380 if (sel->prefixlen_s != 32)
4381 audit_log_format(audit_buf, " src_prefixlen=%d",
4382 sel->prefixlen_s);
4383 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4);
4384 if (sel->prefixlen_d != 32)
4385 audit_log_format(audit_buf, " dst_prefixlen=%d",
4386 sel->prefixlen_d);
4387 break;
4388 case AF_INET6:
4389 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6);
4390 if (sel->prefixlen_s != 128)
4391 audit_log_format(audit_buf, " src_prefixlen=%d",
4392 sel->prefixlen_s);
4393 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6);
4394 if (sel->prefixlen_d != 128)
4395 audit_log_format(audit_buf, " dst_prefixlen=%d",
4396 sel->prefixlen_d);
4397 break;
4398 }
4399 }
4400
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)4401 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid)
4402 {
4403 struct audit_buffer *audit_buf;
4404
4405 audit_buf = xfrm_audit_start("SPD-add");
4406 if (audit_buf == NULL)
4407 return;
4408 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4409 audit_log_format(audit_buf, " res=%u", result);
4410 xfrm_audit_common_policyinfo(xp, audit_buf);
4411 audit_log_end(audit_buf);
4412 }
4413 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
4414
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)4415 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
4416 bool task_valid)
4417 {
4418 struct audit_buffer *audit_buf;
4419
4420 audit_buf = xfrm_audit_start("SPD-delete");
4421 if (audit_buf == NULL)
4422 return;
4423 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
4424 audit_log_format(audit_buf, " res=%u", result);
4425 xfrm_audit_common_policyinfo(xp, audit_buf);
4426 audit_log_end(audit_buf);
4427 }
4428 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete);
4429 #endif
4430
4431 #ifdef CONFIG_XFRM_MIGRATE
xfrm_migrate_policy_find(const struct xfrm_selector * sel,u8 dir,u8 type,struct net * net,u32 if_id)4432 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel,
4433 u8 dir, u8 type, struct net *net, u32 if_id)
4434 {
4435 struct xfrm_policy *pol;
4436 struct flowi fl;
4437
4438 memset(&fl, 0, sizeof(fl));
4439
4440 fl.flowi_proto = sel->proto;
4441
4442 switch (sel->family) {
4443 case AF_INET:
4444 fl.u.ip4.saddr = sel->saddr.a4;
4445 fl.u.ip4.daddr = sel->daddr.a4;
4446 if (sel->proto == IPSEC_ULPROTO_ANY)
4447 break;
4448 fl.u.flowi4_oif = sel->ifindex;
4449 fl.u.ip4.fl4_sport = sel->sport;
4450 fl.u.ip4.fl4_dport = sel->dport;
4451 break;
4452 case AF_INET6:
4453 fl.u.ip6.saddr = sel->saddr.in6;
4454 fl.u.ip6.daddr = sel->daddr.in6;
4455 if (sel->proto == IPSEC_ULPROTO_ANY)
4456 break;
4457 fl.u.flowi6_oif = sel->ifindex;
4458 fl.u.ip6.fl4_sport = sel->sport;
4459 fl.u.ip6.fl4_dport = sel->dport;
4460 break;
4461 default:
4462 return ERR_PTR(-EAFNOSUPPORT);
4463 }
4464
4465 rcu_read_lock();
4466
4467 pol = xfrm_policy_lookup_bytype(net, type, &fl, sel->family, dir, if_id);
4468 if (IS_ERR_OR_NULL(pol))
4469 goto out_unlock;
4470
4471 if (!xfrm_pol_hold_rcu(pol))
4472 pol = NULL;
4473 out_unlock:
4474 rcu_read_unlock();
4475 return pol;
4476 }
4477
migrate_tmpl_match(const struct xfrm_migrate * m,const struct xfrm_tmpl * t)4478 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t)
4479 {
4480 int match = 0;
4481
4482 if (t->mode == m->mode && t->id.proto == m->proto &&
4483 (m->reqid == 0 || t->reqid == m->reqid)) {
4484 switch (t->mode) {
4485 case XFRM_MODE_TUNNEL:
4486 case XFRM_MODE_BEET:
4487 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr,
4488 m->old_family) &&
4489 xfrm_addr_equal(&t->saddr, &m->old_saddr,
4490 m->old_family)) {
4491 match = 1;
4492 }
4493 break;
4494 case XFRM_MODE_TRANSPORT:
4495 /* in case of transport mode, template does not store
4496 any IP addresses, hence we just compare mode and
4497 protocol */
4498 match = 1;
4499 break;
4500 default:
4501 break;
4502 }
4503 }
4504 return match;
4505 }
4506
4507 /* update endpoint address(es) of template(s) */
xfrm_policy_migrate(struct xfrm_policy * pol,struct xfrm_migrate * m,int num_migrate,struct netlink_ext_ack * extack)4508 static int xfrm_policy_migrate(struct xfrm_policy *pol,
4509 struct xfrm_migrate *m, int num_migrate,
4510 struct netlink_ext_ack *extack)
4511 {
4512 struct xfrm_migrate *mp;
4513 int i, j, n = 0;
4514
4515 write_lock_bh(&pol->lock);
4516 if (unlikely(pol->walk.dead)) {
4517 /* target policy has been deleted */
4518 NL_SET_ERR_MSG(extack, "Target policy not found");
4519 write_unlock_bh(&pol->lock);
4520 return -ENOENT;
4521 }
4522
4523 for (i = 0; i < pol->xfrm_nr; i++) {
4524 for (j = 0, mp = m; j < num_migrate; j++, mp++) {
4525 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i]))
4526 continue;
4527 n++;
4528 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL &&
4529 pol->xfrm_vec[i].mode != XFRM_MODE_BEET)
4530 continue;
4531 /* update endpoints */
4532 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr,
4533 sizeof(pol->xfrm_vec[i].id.daddr));
4534 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr,
4535 sizeof(pol->xfrm_vec[i].saddr));
4536 pol->xfrm_vec[i].encap_family = mp->new_family;
4537 /* flush bundles */
4538 atomic_inc(&pol->genid);
4539 }
4540 }
4541
4542 write_unlock_bh(&pol->lock);
4543
4544 if (!n)
4545 return -ENODATA;
4546
4547 return 0;
4548 }
4549
xfrm_migrate_check(const struct xfrm_migrate * m,int num_migrate,struct netlink_ext_ack * extack)4550 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate,
4551 struct netlink_ext_ack *extack)
4552 {
4553 int i, j;
4554
4555 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) {
4556 NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
4557 return -EINVAL;
4558 }
4559
4560 for (i = 0; i < num_migrate; i++) {
4561 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) ||
4562 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) {
4563 NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null");
4564 return -EINVAL;
4565 }
4566
4567 /* check if there is any duplicated entry */
4568 for (j = i + 1; j < num_migrate; j++) {
4569 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr,
4570 sizeof(m[i].old_daddr)) &&
4571 !memcmp(&m[i].old_saddr, &m[j].old_saddr,
4572 sizeof(m[i].old_saddr)) &&
4573 m[i].proto == m[j].proto &&
4574 m[i].mode == m[j].mode &&
4575 m[i].reqid == m[j].reqid &&
4576 m[i].old_family == m[j].old_family) {
4577 NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique");
4578 return -EINVAL;
4579 }
4580 }
4581 }
4582
4583 return 0;
4584 }
4585
xfrm_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,struct xfrm_migrate * m,int num_migrate,struct xfrm_kmaddress * k,struct net * net,struct xfrm_encap_tmpl * encap,u32 if_id,struct netlink_ext_ack * extack)4586 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
4587 struct xfrm_migrate *m, int num_migrate,
4588 struct xfrm_kmaddress *k, struct net *net,
4589 struct xfrm_encap_tmpl *encap, u32 if_id,
4590 struct netlink_ext_ack *extack)
4591 {
4592 int i, err, nx_cur = 0, nx_new = 0;
4593 struct xfrm_policy *pol = NULL;
4594 struct xfrm_state *x, *xc;
4595 struct xfrm_state *x_cur[XFRM_MAX_DEPTH];
4596 struct xfrm_state *x_new[XFRM_MAX_DEPTH];
4597 struct xfrm_migrate *mp;
4598
4599 /* Stage 0 - sanity checks */
4600 err = xfrm_migrate_check(m, num_migrate, extack);
4601 if (err < 0)
4602 goto out;
4603
4604 if (dir >= XFRM_POLICY_MAX) {
4605 NL_SET_ERR_MSG(extack, "Invalid policy direction");
4606 err = -EINVAL;
4607 goto out;
4608 }
4609
4610 /* Stage 1 - find policy */
4611 pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id);
4612 if (IS_ERR_OR_NULL(pol)) {
4613 NL_SET_ERR_MSG(extack, "Target policy not found");
4614 err = IS_ERR(pol) ? PTR_ERR(pol) : -ENOENT;
4615 goto out;
4616 }
4617
4618 /* Stage 2 - find and update state(s) */
4619 for (i = 0, mp = m; i < num_migrate; i++, mp++) {
4620 if ((x = xfrm_migrate_state_find(mp, net, if_id))) {
4621 x_cur[nx_cur] = x;
4622 nx_cur++;
4623 xc = xfrm_state_migrate(x, mp, encap);
4624 if (xc) {
4625 x_new[nx_new] = xc;
4626 nx_new++;
4627 } else {
4628 err = -ENODATA;
4629 goto restore_state;
4630 }
4631 }
4632 }
4633
4634 /* Stage 3 - update policy */
4635 err = xfrm_policy_migrate(pol, m, num_migrate, extack);
4636 if (err < 0)
4637 goto restore_state;
4638
4639 /* Stage 4 - delete old state(s) */
4640 if (nx_cur) {
4641 xfrm_states_put(x_cur, nx_cur);
4642 xfrm_states_delete(x_cur, nx_cur);
4643 }
4644
4645 /* Stage 5 - announce */
4646 km_migrate(sel, dir, type, m, num_migrate, k, encap);
4647
4648 xfrm_pol_put(pol);
4649
4650 return 0;
4651 out:
4652 return err;
4653
4654 restore_state:
4655 if (pol)
4656 xfrm_pol_put(pol);
4657 if (nx_cur)
4658 xfrm_states_put(x_cur, nx_cur);
4659 if (nx_new)
4660 xfrm_states_delete(x_new, nx_new);
4661
4662 return err;
4663 }
4664 EXPORT_SYMBOL(xfrm_migrate);
4665 #endif
4666