1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * xfrm_policy.c 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * Kazunori MIYAZAWA @USAGI 11 * YOSHIFUJI Hideaki 12 * Split up af-specific portion 13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 14 * 15 */ 16 17 #include <linux/err.h> 18 #include <linux/slab.h> 19 #include <linux/kmod.h> 20 #include <linux/list.h> 21 #include <linux/spinlock.h> 22 #include <linux/workqueue.h> 23 #include <linux/notifier.h> 24 #include <linux/netdevice.h> 25 #include <linux/netfilter.h> 26 #include <linux/module.h> 27 #include <linux/cache.h> 28 #include <linux/cpu.h> 29 #include <linux/audit.h> 30 #include <linux/rhashtable.h> 31 #include <linux/if_tunnel.h> 32 #include <linux/icmp.h> 33 #include <net/dst.h> 34 #include <net/flow.h> 35 #include <net/inet_ecn.h> 36 #include <net/xfrm.h> 37 #include <net/ip.h> 38 #include <net/gre.h> 39 #if IS_ENABLED(CONFIG_IPV6_MIP6) 40 #include <net/mip6.h> 41 #endif 42 #ifdef CONFIG_XFRM_STATISTICS 43 #include <net/snmp.h> 44 #endif 45 #ifdef CONFIG_XFRM_ESPINTCP 46 #include <net/espintcp.h> 47 #endif 48 49 #include "xfrm_hash.h" 50 51 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10)) 52 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) 53 #define XFRM_MAX_QUEUE_LEN 100 54 55 struct xfrm_flo { 56 struct dst_entry *dst_orig; 57 u8 flags; 58 }; 59 60 /* prefixes smaller than this are stored in lists, not trees. */ 61 #define INEXACT_PREFIXLEN_IPV4 16 62 #define INEXACT_PREFIXLEN_IPV6 48 63 64 struct xfrm_pol_inexact_node { 65 struct rb_node node; 66 union { 67 xfrm_address_t addr; 68 struct rcu_head rcu; 69 }; 70 u8 prefixlen; 71 72 struct rb_root root; 73 74 /* the policies matching this node, can be empty list */ 75 struct hlist_head hhead; 76 }; 77 78 /* xfrm inexact policy search tree: 79 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id); 80 * | 81 * +---- root_d: sorted by daddr:prefix 82 * | | 83 * | xfrm_pol_inexact_node 84 * | | 85 * | +- root: sorted by saddr/prefix 86 * | | | 87 * | | xfrm_pol_inexact_node 88 * | | | 89 * | | + root: unused 90 * | | | 91 * | | + hhead: saddr:daddr policies 92 * | | 93 * | +- coarse policies and all any:daddr policies 94 * | 95 * +---- root_s: sorted by saddr:prefix 96 * | | 97 * | xfrm_pol_inexact_node 98 * | | 99 * | + root: unused 100 * | | 101 * | + hhead: saddr:any policies 102 * | 103 * +---- coarse policies and all any:any policies 104 * 105 * Lookups return four candidate lists: 106 * 1. any:any list from top-level xfrm_pol_inexact_bin 107 * 2. any:daddr list from daddr tree 108 * 3. saddr:daddr list from 2nd level daddr tree 109 * 4. saddr:any list from saddr tree 110 * 111 * This result set then needs to be searched for the policy with 112 * the lowest priority. If two results have same prio, youngest one wins. 113 */ 114 115 struct xfrm_pol_inexact_key { 116 possible_net_t net; 117 u32 if_id; 118 u16 family; 119 u8 dir, type; 120 }; 121 122 struct xfrm_pol_inexact_bin { 123 struct xfrm_pol_inexact_key k; 124 struct rhash_head head; 125 /* list containing '*:*' policies */ 126 struct hlist_head hhead; 127 128 seqcount_spinlock_t count; 129 /* tree sorted by daddr/prefix */ 130 struct rb_root root_d; 131 132 /* tree sorted by saddr/prefix */ 133 struct rb_root root_s; 134 135 /* slow path below */ 136 struct list_head inexact_bins; 137 struct rcu_head rcu; 138 }; 139 140 enum xfrm_pol_inexact_candidate_type { 141 XFRM_POL_CAND_BOTH, 142 XFRM_POL_CAND_SADDR, 143 XFRM_POL_CAND_DADDR, 144 XFRM_POL_CAND_ANY, 145 146 XFRM_POL_CAND_MAX, 147 }; 148 149 struct xfrm_pol_inexact_candidates { 150 struct hlist_head *res[XFRM_POL_CAND_MAX]; 151 }; 152 153 struct xfrm_flow_keys { 154 struct flow_dissector_key_basic basic; 155 struct flow_dissector_key_control control; 156 union { 157 struct flow_dissector_key_ipv4_addrs ipv4; 158 struct flow_dissector_key_ipv6_addrs ipv6; 159 } addrs; 160 struct flow_dissector_key_ip ip; 161 struct flow_dissector_key_icmp icmp; 162 struct flow_dissector_key_ports ports; 163 struct flow_dissector_key_keyid gre; 164 }; 165 166 static struct flow_dissector xfrm_session_dissector __ro_after_init; 167 168 static DEFINE_SPINLOCK(xfrm_if_cb_lock); 169 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly; 170 171 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); 172 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1] 173 __read_mostly; 174 175 static struct kmem_cache *xfrm_dst_cache __ro_after_init; 176 177 static struct rhashtable xfrm_policy_inexact_table; 178 static const struct rhashtable_params xfrm_pol_inexact_params; 179 180 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr); 181 static int stale_bundle(struct dst_entry *dst); 182 static int xfrm_bundle_ok(struct xfrm_dst *xdst); 183 static void xfrm_policy_queue_process(struct timer_list *t); 184 185 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir); 186 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 187 int dir); 188 189 static struct xfrm_pol_inexact_bin * 190 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir, 191 u32 if_id); 192 193 static struct xfrm_pol_inexact_bin * 194 xfrm_policy_inexact_lookup_rcu(struct net *net, 195 u8 type, u16 family, u8 dir, u32 if_id); 196 static struct xfrm_policy * 197 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy, 198 bool excl); 199 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, 200 struct xfrm_policy *policy); 201 202 static bool 203 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand, 204 struct xfrm_pol_inexact_bin *b, 205 const xfrm_address_t *saddr, 206 const xfrm_address_t *daddr); 207 208 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy) 209 { 210 return refcount_inc_not_zero(&policy->refcnt); 211 } 212 213 static inline bool 214 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 215 { 216 const struct flowi4 *fl4 = &fl->u.ip4; 217 218 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && 219 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && 220 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && 221 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && 222 (fl4->flowi4_proto == sel->proto || !sel->proto) && 223 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 224 } 225 226 static inline bool 227 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 228 { 229 const struct flowi6 *fl6 = &fl->u.ip6; 230 231 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && 232 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && 233 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && 234 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && 235 (fl6->flowi6_proto == sel->proto || !sel->proto) && 236 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 237 } 238 239 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 240 unsigned short family) 241 { 242 switch (family) { 243 case AF_INET: 244 return __xfrm4_selector_match(sel, fl); 245 case AF_INET6: 246 return __xfrm6_selector_match(sel, fl); 247 } 248 return false; 249 } 250 251 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 252 { 253 const struct xfrm_policy_afinfo *afinfo; 254 255 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo))) 256 return NULL; 257 rcu_read_lock(); 258 afinfo = rcu_dereference(xfrm_policy_afinfo[family]); 259 if (unlikely(!afinfo)) 260 rcu_read_unlock(); 261 return afinfo; 262 } 263 264 /* Called with rcu_read_lock(). */ 265 static const struct xfrm_if_cb *xfrm_if_get_cb(void) 266 { 267 return rcu_dereference(xfrm_if_cb); 268 } 269 270 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, 271 const xfrm_address_t *saddr, 272 const xfrm_address_t *daddr, 273 int family, u32 mark) 274 { 275 const struct xfrm_policy_afinfo *afinfo; 276 struct dst_entry *dst; 277 278 afinfo = xfrm_policy_get_afinfo(family); 279 if (unlikely(afinfo == NULL)) 280 return ERR_PTR(-EAFNOSUPPORT); 281 282 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark); 283 284 rcu_read_unlock(); 285 286 return dst; 287 } 288 EXPORT_SYMBOL(__xfrm_dst_lookup); 289 290 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, 291 int tos, int oif, 292 xfrm_address_t *prev_saddr, 293 xfrm_address_t *prev_daddr, 294 int family, u32 mark) 295 { 296 struct net *net = xs_net(x); 297 xfrm_address_t *saddr = &x->props.saddr; 298 xfrm_address_t *daddr = &x->id.daddr; 299 struct dst_entry *dst; 300 301 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 302 saddr = x->coaddr; 303 daddr = prev_daddr; 304 } 305 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 306 saddr = prev_saddr; 307 daddr = x->coaddr; 308 } 309 310 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark); 311 312 if (!IS_ERR(dst)) { 313 if (prev_saddr != saddr) 314 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 315 if (prev_daddr != daddr) 316 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 317 } 318 319 return dst; 320 } 321 322 static inline unsigned long make_jiffies(long secs) 323 { 324 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 325 return MAX_SCHEDULE_TIMEOUT-1; 326 else 327 return secs*HZ; 328 } 329 330 static void xfrm_policy_timer(struct timer_list *t) 331 { 332 struct xfrm_policy *xp = from_timer(xp, t, timer); 333 time64_t now = ktime_get_real_seconds(); 334 time64_t next = TIME64_MAX; 335 int warn = 0; 336 int dir; 337 338 read_lock(&xp->lock); 339 340 if (unlikely(xp->walk.dead)) 341 goto out; 342 343 dir = xfrm_policy_id2dir(xp->index); 344 345 if (xp->lft.hard_add_expires_seconds) { 346 time64_t tmo = xp->lft.hard_add_expires_seconds + 347 xp->curlft.add_time - now; 348 if (tmo <= 0) 349 goto expired; 350 if (tmo < next) 351 next = tmo; 352 } 353 if (xp->lft.hard_use_expires_seconds) { 354 time64_t tmo = xp->lft.hard_use_expires_seconds + 355 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now; 356 if (tmo <= 0) 357 goto expired; 358 if (tmo < next) 359 next = tmo; 360 } 361 if (xp->lft.soft_add_expires_seconds) { 362 time64_t tmo = xp->lft.soft_add_expires_seconds + 363 xp->curlft.add_time - now; 364 if (tmo <= 0) { 365 warn = 1; 366 tmo = XFRM_KM_TIMEOUT; 367 } 368 if (tmo < next) 369 next = tmo; 370 } 371 if (xp->lft.soft_use_expires_seconds) { 372 time64_t tmo = xp->lft.soft_use_expires_seconds + 373 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now; 374 if (tmo <= 0) { 375 warn = 1; 376 tmo = XFRM_KM_TIMEOUT; 377 } 378 if (tmo < next) 379 next = tmo; 380 } 381 382 if (warn) 383 km_policy_expired(xp, dir, 0, 0); 384 if (next != TIME64_MAX && 385 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 386 xfrm_pol_hold(xp); 387 388 out: 389 read_unlock(&xp->lock); 390 xfrm_pol_put(xp); 391 return; 392 393 expired: 394 read_unlock(&xp->lock); 395 if (!xfrm_policy_delete(xp, dir)) 396 km_policy_expired(xp, dir, 1, 0); 397 xfrm_pol_put(xp); 398 } 399 400 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 401 * SPD calls. 402 */ 403 404 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) 405 { 406 struct xfrm_policy *policy; 407 408 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 409 410 if (policy) { 411 write_pnet(&policy->xp_net, net); 412 INIT_LIST_HEAD(&policy->walk.all); 413 INIT_HLIST_NODE(&policy->bydst_inexact_list); 414 INIT_HLIST_NODE(&policy->bydst); 415 INIT_HLIST_NODE(&policy->byidx); 416 rwlock_init(&policy->lock); 417 refcount_set(&policy->refcnt, 1); 418 skb_queue_head_init(&policy->polq.hold_queue); 419 timer_setup(&policy->timer, xfrm_policy_timer, 0); 420 timer_setup(&policy->polq.hold_timer, 421 xfrm_policy_queue_process, 0); 422 } 423 return policy; 424 } 425 EXPORT_SYMBOL(xfrm_policy_alloc); 426 427 static void xfrm_policy_destroy_rcu(struct rcu_head *head) 428 { 429 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu); 430 431 security_xfrm_policy_free(policy->security); 432 kfree(policy); 433 } 434 435 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 436 437 void xfrm_policy_destroy(struct xfrm_policy *policy) 438 { 439 BUG_ON(!policy->walk.dead); 440 441 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) 442 BUG(); 443 444 xfrm_dev_policy_free(policy); 445 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu); 446 } 447 EXPORT_SYMBOL(xfrm_policy_destroy); 448 449 /* Rule must be locked. Release descendant resources, announce 450 * entry dead. The rule must be unlinked from lists to the moment. 451 */ 452 453 static void xfrm_policy_kill(struct xfrm_policy *policy) 454 { 455 write_lock_bh(&policy->lock); 456 policy->walk.dead = 1; 457 write_unlock_bh(&policy->lock); 458 459 atomic_inc(&policy->genid); 460 461 if (del_timer(&policy->polq.hold_timer)) 462 xfrm_pol_put(policy); 463 skb_queue_purge(&policy->polq.hold_queue); 464 465 if (del_timer(&policy->timer)) 466 xfrm_pol_put(policy); 467 468 xfrm_pol_put(policy); 469 } 470 471 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 472 473 static inline unsigned int idx_hash(struct net *net, u32 index) 474 { 475 return __idx_hash(index, net->xfrm.policy_idx_hmask); 476 } 477 478 /* calculate policy hash thresholds */ 479 static void __get_hash_thresh(struct net *net, 480 unsigned short family, int dir, 481 u8 *dbits, u8 *sbits) 482 { 483 switch (family) { 484 case AF_INET: 485 *dbits = net->xfrm.policy_bydst[dir].dbits4; 486 *sbits = net->xfrm.policy_bydst[dir].sbits4; 487 break; 488 489 case AF_INET6: 490 *dbits = net->xfrm.policy_bydst[dir].dbits6; 491 *sbits = net->xfrm.policy_bydst[dir].sbits6; 492 break; 493 494 default: 495 *dbits = 0; 496 *sbits = 0; 497 } 498 } 499 500 static struct hlist_head *policy_hash_bysel(struct net *net, 501 const struct xfrm_selector *sel, 502 unsigned short family, int dir) 503 { 504 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 505 unsigned int hash; 506 u8 dbits; 507 u8 sbits; 508 509 __get_hash_thresh(net, family, dir, &dbits, &sbits); 510 hash = __sel_hash(sel, family, hmask, dbits, sbits); 511 512 if (hash == hmask + 1) 513 return NULL; 514 515 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table, 516 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash; 517 } 518 519 static struct hlist_head *policy_hash_direct(struct net *net, 520 const xfrm_address_t *daddr, 521 const xfrm_address_t *saddr, 522 unsigned short family, int dir) 523 { 524 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 525 unsigned int hash; 526 u8 dbits; 527 u8 sbits; 528 529 __get_hash_thresh(net, family, dir, &dbits, &sbits); 530 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits); 531 532 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table, 533 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash; 534 } 535 536 static void xfrm_dst_hash_transfer(struct net *net, 537 struct hlist_head *list, 538 struct hlist_head *ndsttable, 539 unsigned int nhashmask, 540 int dir) 541 { 542 struct hlist_node *tmp, *entry0 = NULL; 543 struct xfrm_policy *pol; 544 unsigned int h0 = 0; 545 u8 dbits; 546 u8 sbits; 547 548 redo: 549 hlist_for_each_entry_safe(pol, tmp, list, bydst) { 550 unsigned int h; 551 552 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits); 553 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 554 pol->family, nhashmask, dbits, sbits); 555 if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { 556 hlist_del_rcu(&pol->bydst); 557 hlist_add_head_rcu(&pol->bydst, ndsttable + h); 558 h0 = h; 559 } else { 560 if (h != h0) 561 continue; 562 hlist_del_rcu(&pol->bydst); 563 hlist_add_behind_rcu(&pol->bydst, entry0); 564 } 565 entry0 = &pol->bydst; 566 } 567 if (!hlist_empty(list)) { 568 entry0 = NULL; 569 goto redo; 570 } 571 } 572 573 static void xfrm_idx_hash_transfer(struct hlist_head *list, 574 struct hlist_head *nidxtable, 575 unsigned int nhashmask) 576 { 577 struct hlist_node *tmp; 578 struct xfrm_policy *pol; 579 580 hlist_for_each_entry_safe(pol, tmp, list, byidx) { 581 unsigned int h; 582 583 h = __idx_hash(pol->index, nhashmask); 584 hlist_add_head(&pol->byidx, nidxtable+h); 585 } 586 } 587 588 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 589 { 590 return ((old_hmask + 1) << 1) - 1; 591 } 592 593 static void xfrm_bydst_resize(struct net *net, int dir) 594 { 595 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 596 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 597 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 598 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 599 struct hlist_head *odst; 600 int i; 601 602 if (!ndst) 603 return; 604 605 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 606 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); 607 608 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table, 609 lockdep_is_held(&net->xfrm.xfrm_policy_lock)); 610 611 for (i = hmask; i >= 0; i--) 612 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir); 613 614 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst); 615 net->xfrm.policy_bydst[dir].hmask = nhashmask; 616 617 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation); 618 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 619 620 synchronize_rcu(); 621 622 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 623 } 624 625 static void xfrm_byidx_resize(struct net *net) 626 { 627 unsigned int hmask = net->xfrm.policy_idx_hmask; 628 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 629 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 630 struct hlist_head *oidx = net->xfrm.policy_byidx; 631 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 632 int i; 633 634 if (!nidx) 635 return; 636 637 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 638 639 for (i = hmask; i >= 0; i--) 640 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 641 642 net->xfrm.policy_byidx = nidx; 643 net->xfrm.policy_idx_hmask = nhashmask; 644 645 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 646 647 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 648 } 649 650 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) 651 { 652 unsigned int cnt = net->xfrm.policy_count[dir]; 653 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 654 655 if (total) 656 *total += cnt; 657 658 if ((hmask + 1) < xfrm_policy_hashmax && 659 cnt > hmask) 660 return 1; 661 662 return 0; 663 } 664 665 static inline int xfrm_byidx_should_resize(struct net *net, int total) 666 { 667 unsigned int hmask = net->xfrm.policy_idx_hmask; 668 669 if ((hmask + 1) < xfrm_policy_hashmax && 670 total > hmask) 671 return 1; 672 673 return 0; 674 } 675 676 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 677 { 678 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 679 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 680 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 681 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 682 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 683 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 684 si->spdhcnt = net->xfrm.policy_idx_hmask; 685 si->spdhmcnt = xfrm_policy_hashmax; 686 } 687 EXPORT_SYMBOL(xfrm_spd_getinfo); 688 689 static DEFINE_MUTEX(hash_resize_mutex); 690 static void xfrm_hash_resize(struct work_struct *work) 691 { 692 struct net *net = container_of(work, struct net, xfrm.policy_hash_work); 693 int dir, total; 694 695 mutex_lock(&hash_resize_mutex); 696 697 total = 0; 698 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 699 if (xfrm_bydst_should_resize(net, dir, &total)) 700 xfrm_bydst_resize(net, dir); 701 } 702 if (xfrm_byidx_should_resize(net, total)) 703 xfrm_byidx_resize(net); 704 705 mutex_unlock(&hash_resize_mutex); 706 } 707 708 /* Make sure *pol can be inserted into fastbin. 709 * Useful to check that later insert requests will be successful 710 * (provided xfrm_policy_lock is held throughout). 711 */ 712 static struct xfrm_pol_inexact_bin * 713 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir) 714 { 715 struct xfrm_pol_inexact_bin *bin, *prev; 716 struct xfrm_pol_inexact_key k = { 717 .family = pol->family, 718 .type = pol->type, 719 .dir = dir, 720 .if_id = pol->if_id, 721 }; 722 struct net *net = xp_net(pol); 723 724 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 725 726 write_pnet(&k.net, net); 727 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k, 728 xfrm_pol_inexact_params); 729 if (bin) 730 return bin; 731 732 bin = kzalloc(sizeof(*bin), GFP_ATOMIC); 733 if (!bin) 734 return NULL; 735 736 bin->k = k; 737 INIT_HLIST_HEAD(&bin->hhead); 738 bin->root_d = RB_ROOT; 739 bin->root_s = RB_ROOT; 740 seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock); 741 742 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table, 743 &bin->k, &bin->head, 744 xfrm_pol_inexact_params); 745 if (!prev) { 746 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins); 747 return bin; 748 } 749 750 kfree(bin); 751 752 return IS_ERR(prev) ? NULL : prev; 753 } 754 755 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr, 756 int family, u8 prefixlen) 757 { 758 if (xfrm_addr_any(addr, family)) 759 return true; 760 761 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6) 762 return true; 763 764 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4) 765 return true; 766 767 return false; 768 } 769 770 static bool 771 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy) 772 { 773 const xfrm_address_t *addr; 774 bool saddr_any, daddr_any; 775 u8 prefixlen; 776 777 addr = &policy->selector.saddr; 778 prefixlen = policy->selector.prefixlen_s; 779 780 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr, 781 policy->family, 782 prefixlen); 783 addr = &policy->selector.daddr; 784 prefixlen = policy->selector.prefixlen_d; 785 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr, 786 policy->family, 787 prefixlen); 788 return saddr_any && daddr_any; 789 } 790 791 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node, 792 const xfrm_address_t *addr, u8 prefixlen) 793 { 794 node->addr = *addr; 795 node->prefixlen = prefixlen; 796 } 797 798 static struct xfrm_pol_inexact_node * 799 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen) 800 { 801 struct xfrm_pol_inexact_node *node; 802 803 node = kzalloc(sizeof(*node), GFP_ATOMIC); 804 if (node) 805 xfrm_pol_inexact_node_init(node, addr, prefixlen); 806 807 return node; 808 } 809 810 static int xfrm_policy_addr_delta(const xfrm_address_t *a, 811 const xfrm_address_t *b, 812 u8 prefixlen, u16 family) 813 { 814 u32 ma, mb, mask; 815 unsigned int pdw, pbi; 816 int delta = 0; 817 818 switch (family) { 819 case AF_INET: 820 if (prefixlen == 0) 821 return 0; 822 mask = ~0U << (32 - prefixlen); 823 ma = ntohl(a->a4) & mask; 824 mb = ntohl(b->a4) & mask; 825 if (ma < mb) 826 delta = -1; 827 else if (ma > mb) 828 delta = 1; 829 break; 830 case AF_INET6: 831 pdw = prefixlen >> 5; 832 pbi = prefixlen & 0x1f; 833 834 if (pdw) { 835 delta = memcmp(a->a6, b->a6, pdw << 2); 836 if (delta) 837 return delta; 838 } 839 if (pbi) { 840 mask = ~0U << (32 - pbi); 841 ma = ntohl(a->a6[pdw]) & mask; 842 mb = ntohl(b->a6[pdw]) & mask; 843 if (ma < mb) 844 delta = -1; 845 else if (ma > mb) 846 delta = 1; 847 } 848 break; 849 default: 850 break; 851 } 852 853 return delta; 854 } 855 856 static void xfrm_policy_inexact_list_reinsert(struct net *net, 857 struct xfrm_pol_inexact_node *n, 858 u16 family) 859 { 860 unsigned int matched_s, matched_d; 861 struct xfrm_policy *policy, *p; 862 863 matched_s = 0; 864 matched_d = 0; 865 866 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 867 struct hlist_node *newpos = NULL; 868 bool matches_s, matches_d; 869 870 if (policy->walk.dead || !policy->bydst_reinsert) 871 continue; 872 873 WARN_ON_ONCE(policy->family != family); 874 875 policy->bydst_reinsert = false; 876 hlist_for_each_entry(p, &n->hhead, bydst) { 877 if (policy->priority > p->priority) 878 newpos = &p->bydst; 879 else if (policy->priority == p->priority && 880 policy->pos > p->pos) 881 newpos = &p->bydst; 882 else 883 break; 884 } 885 886 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET) 887 hlist_add_behind_rcu(&policy->bydst, newpos); 888 else 889 hlist_add_head_rcu(&policy->bydst, &n->hhead); 890 891 /* paranoia checks follow. 892 * Check that the reinserted policy matches at least 893 * saddr or daddr for current node prefix. 894 * 895 * Matching both is fine, matching saddr in one policy 896 * (but not daddr) and then matching only daddr in another 897 * is a bug. 898 */ 899 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr, 900 &n->addr, 901 n->prefixlen, 902 family) == 0; 903 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr, 904 &n->addr, 905 n->prefixlen, 906 family) == 0; 907 if (matches_s && matches_d) 908 continue; 909 910 WARN_ON_ONCE(!matches_s && !matches_d); 911 if (matches_s) 912 matched_s++; 913 if (matches_d) 914 matched_d++; 915 WARN_ON_ONCE(matched_s && matched_d); 916 } 917 } 918 919 static void xfrm_policy_inexact_node_reinsert(struct net *net, 920 struct xfrm_pol_inexact_node *n, 921 struct rb_root *new, 922 u16 family) 923 { 924 struct xfrm_pol_inexact_node *node; 925 struct rb_node **p, *parent; 926 927 /* we should not have another subtree here */ 928 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root)); 929 restart: 930 parent = NULL; 931 p = &new->rb_node; 932 while (*p) { 933 u8 prefixlen; 934 int delta; 935 936 parent = *p; 937 node = rb_entry(*p, struct xfrm_pol_inexact_node, node); 938 939 prefixlen = min(node->prefixlen, n->prefixlen); 940 941 delta = xfrm_policy_addr_delta(&n->addr, &node->addr, 942 prefixlen, family); 943 if (delta < 0) { 944 p = &parent->rb_left; 945 } else if (delta > 0) { 946 p = &parent->rb_right; 947 } else { 948 bool same_prefixlen = node->prefixlen == n->prefixlen; 949 struct xfrm_policy *tmp; 950 951 hlist_for_each_entry(tmp, &n->hhead, bydst) { 952 tmp->bydst_reinsert = true; 953 hlist_del_rcu(&tmp->bydst); 954 } 955 956 node->prefixlen = prefixlen; 957 958 xfrm_policy_inexact_list_reinsert(net, node, family); 959 960 if (same_prefixlen) { 961 kfree_rcu(n, rcu); 962 return; 963 } 964 965 rb_erase(*p, new); 966 kfree_rcu(n, rcu); 967 n = node; 968 goto restart; 969 } 970 } 971 972 rb_link_node_rcu(&n->node, parent, p); 973 rb_insert_color(&n->node, new); 974 } 975 976 /* merge nodes v and n */ 977 static void xfrm_policy_inexact_node_merge(struct net *net, 978 struct xfrm_pol_inexact_node *v, 979 struct xfrm_pol_inexact_node *n, 980 u16 family) 981 { 982 struct xfrm_pol_inexact_node *node; 983 struct xfrm_policy *tmp; 984 struct rb_node *rnode; 985 986 /* To-be-merged node v has a subtree. 987 * 988 * Dismantle it and insert its nodes to n->root. 989 */ 990 while ((rnode = rb_first(&v->root)) != NULL) { 991 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node); 992 rb_erase(&node->node, &v->root); 993 xfrm_policy_inexact_node_reinsert(net, node, &n->root, 994 family); 995 } 996 997 hlist_for_each_entry(tmp, &v->hhead, bydst) { 998 tmp->bydst_reinsert = true; 999 hlist_del_rcu(&tmp->bydst); 1000 } 1001 1002 xfrm_policy_inexact_list_reinsert(net, n, family); 1003 } 1004 1005 static struct xfrm_pol_inexact_node * 1006 xfrm_policy_inexact_insert_node(struct net *net, 1007 struct rb_root *root, 1008 xfrm_address_t *addr, 1009 u16 family, u8 prefixlen, u8 dir) 1010 { 1011 struct xfrm_pol_inexact_node *cached = NULL; 1012 struct rb_node **p, *parent = NULL; 1013 struct xfrm_pol_inexact_node *node; 1014 1015 p = &root->rb_node; 1016 while (*p) { 1017 int delta; 1018 1019 parent = *p; 1020 node = rb_entry(*p, struct xfrm_pol_inexact_node, node); 1021 1022 delta = xfrm_policy_addr_delta(addr, &node->addr, 1023 node->prefixlen, 1024 family); 1025 if (delta == 0 && prefixlen >= node->prefixlen) { 1026 WARN_ON_ONCE(cached); /* ipsec policies got lost */ 1027 return node; 1028 } 1029 1030 if (delta < 0) 1031 p = &parent->rb_left; 1032 else 1033 p = &parent->rb_right; 1034 1035 if (prefixlen < node->prefixlen) { 1036 delta = xfrm_policy_addr_delta(addr, &node->addr, 1037 prefixlen, 1038 family); 1039 if (delta) 1040 continue; 1041 1042 /* This node is a subnet of the new prefix. It needs 1043 * to be removed and re-inserted with the smaller 1044 * prefix and all nodes that are now also covered 1045 * by the reduced prefixlen. 1046 */ 1047 rb_erase(&node->node, root); 1048 1049 if (!cached) { 1050 xfrm_pol_inexact_node_init(node, addr, 1051 prefixlen); 1052 cached = node; 1053 } else { 1054 /* This node also falls within the new 1055 * prefixlen. Merge the to-be-reinserted 1056 * node and this one. 1057 */ 1058 xfrm_policy_inexact_node_merge(net, node, 1059 cached, family); 1060 kfree_rcu(node, rcu); 1061 } 1062 1063 /* restart */ 1064 p = &root->rb_node; 1065 parent = NULL; 1066 } 1067 } 1068 1069 node = cached; 1070 if (!node) { 1071 node = xfrm_pol_inexact_node_alloc(addr, prefixlen); 1072 if (!node) 1073 return NULL; 1074 } 1075 1076 rb_link_node_rcu(&node->node, parent, p); 1077 rb_insert_color(&node->node, root); 1078 1079 return node; 1080 } 1081 1082 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm) 1083 { 1084 struct xfrm_pol_inexact_node *node; 1085 struct rb_node *rn = rb_first(r); 1086 1087 while (rn) { 1088 node = rb_entry(rn, struct xfrm_pol_inexact_node, node); 1089 1090 xfrm_policy_inexact_gc_tree(&node->root, rm); 1091 rn = rb_next(rn); 1092 1093 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) { 1094 WARN_ON_ONCE(rm); 1095 continue; 1096 } 1097 1098 rb_erase(&node->node, r); 1099 kfree_rcu(node, rcu); 1100 } 1101 } 1102 1103 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit) 1104 { 1105 write_seqcount_begin(&b->count); 1106 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit); 1107 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit); 1108 write_seqcount_end(&b->count); 1109 1110 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) || 1111 !hlist_empty(&b->hhead)) { 1112 WARN_ON_ONCE(net_exit); 1113 return; 1114 } 1115 1116 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head, 1117 xfrm_pol_inexact_params) == 0) { 1118 list_del(&b->inexact_bins); 1119 kfree_rcu(b, rcu); 1120 } 1121 } 1122 1123 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b) 1124 { 1125 struct net *net = read_pnet(&b->k.net); 1126 1127 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1128 __xfrm_policy_inexact_prune_bin(b, false); 1129 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1130 } 1131 1132 static void __xfrm_policy_inexact_flush(struct net *net) 1133 { 1134 struct xfrm_pol_inexact_bin *bin, *t; 1135 1136 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1137 1138 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins) 1139 __xfrm_policy_inexact_prune_bin(bin, false); 1140 } 1141 1142 static struct hlist_head * 1143 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin, 1144 struct xfrm_policy *policy, u8 dir) 1145 { 1146 struct xfrm_pol_inexact_node *n; 1147 struct net *net; 1148 1149 net = xp_net(policy); 1150 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1151 1152 if (xfrm_policy_inexact_insert_use_any_list(policy)) 1153 return &bin->hhead; 1154 1155 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr, 1156 policy->family, 1157 policy->selector.prefixlen_d)) { 1158 write_seqcount_begin(&bin->count); 1159 n = xfrm_policy_inexact_insert_node(net, 1160 &bin->root_s, 1161 &policy->selector.saddr, 1162 policy->family, 1163 policy->selector.prefixlen_s, 1164 dir); 1165 write_seqcount_end(&bin->count); 1166 if (!n) 1167 return NULL; 1168 1169 return &n->hhead; 1170 } 1171 1172 /* daddr is fixed */ 1173 write_seqcount_begin(&bin->count); 1174 n = xfrm_policy_inexact_insert_node(net, 1175 &bin->root_d, 1176 &policy->selector.daddr, 1177 policy->family, 1178 policy->selector.prefixlen_d, dir); 1179 write_seqcount_end(&bin->count); 1180 if (!n) 1181 return NULL; 1182 1183 /* saddr is wildcard */ 1184 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr, 1185 policy->family, 1186 policy->selector.prefixlen_s)) 1187 return &n->hhead; 1188 1189 write_seqcount_begin(&bin->count); 1190 n = xfrm_policy_inexact_insert_node(net, 1191 &n->root, 1192 &policy->selector.saddr, 1193 policy->family, 1194 policy->selector.prefixlen_s, dir); 1195 write_seqcount_end(&bin->count); 1196 if (!n) 1197 return NULL; 1198 1199 return &n->hhead; 1200 } 1201 1202 static struct xfrm_policy * 1203 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl) 1204 { 1205 struct xfrm_pol_inexact_bin *bin; 1206 struct xfrm_policy *delpol; 1207 struct hlist_head *chain; 1208 struct net *net; 1209 1210 bin = xfrm_policy_inexact_alloc_bin(policy, dir); 1211 if (!bin) 1212 return ERR_PTR(-ENOMEM); 1213 1214 net = xp_net(policy); 1215 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1216 1217 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir); 1218 if (!chain) { 1219 __xfrm_policy_inexact_prune_bin(bin, false); 1220 return ERR_PTR(-ENOMEM); 1221 } 1222 1223 delpol = xfrm_policy_insert_list(chain, policy, excl); 1224 if (delpol && excl) { 1225 __xfrm_policy_inexact_prune_bin(bin, false); 1226 return ERR_PTR(-EEXIST); 1227 } 1228 1229 chain = &net->xfrm.policy_inexact[dir]; 1230 xfrm_policy_insert_inexact_list(chain, policy); 1231 1232 if (delpol) 1233 __xfrm_policy_inexact_prune_bin(bin, false); 1234 1235 return delpol; 1236 } 1237 1238 static void xfrm_hash_rebuild(struct work_struct *work) 1239 { 1240 struct net *net = container_of(work, struct net, 1241 xfrm.policy_hthresh.work); 1242 unsigned int hmask; 1243 struct xfrm_policy *pol; 1244 struct xfrm_policy *policy; 1245 struct hlist_head *chain; 1246 struct hlist_head *odst; 1247 struct hlist_node *newpos; 1248 int i; 1249 int dir; 1250 unsigned seq; 1251 u8 lbits4, rbits4, lbits6, rbits6; 1252 1253 mutex_lock(&hash_resize_mutex); 1254 1255 /* read selector prefixlen thresholds */ 1256 do { 1257 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock); 1258 1259 lbits4 = net->xfrm.policy_hthresh.lbits4; 1260 rbits4 = net->xfrm.policy_hthresh.rbits4; 1261 lbits6 = net->xfrm.policy_hthresh.lbits6; 1262 rbits6 = net->xfrm.policy_hthresh.rbits6; 1263 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); 1264 1265 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1266 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); 1267 1268 /* make sure that we can insert the indirect policies again before 1269 * we start with destructive action. 1270 */ 1271 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) { 1272 struct xfrm_pol_inexact_bin *bin; 1273 u8 dbits, sbits; 1274 1275 if (policy->walk.dead) 1276 continue; 1277 1278 dir = xfrm_policy_id2dir(policy->index); 1279 if (dir >= XFRM_POLICY_MAX) 1280 continue; 1281 1282 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { 1283 if (policy->family == AF_INET) { 1284 dbits = rbits4; 1285 sbits = lbits4; 1286 } else { 1287 dbits = rbits6; 1288 sbits = lbits6; 1289 } 1290 } else { 1291 if (policy->family == AF_INET) { 1292 dbits = lbits4; 1293 sbits = rbits4; 1294 } else { 1295 dbits = lbits6; 1296 sbits = rbits6; 1297 } 1298 } 1299 1300 if (policy->selector.prefixlen_d < dbits || 1301 policy->selector.prefixlen_s < sbits) 1302 continue; 1303 1304 bin = xfrm_policy_inexact_alloc_bin(policy, dir); 1305 if (!bin) 1306 goto out_unlock; 1307 1308 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir)) 1309 goto out_unlock; 1310 } 1311 1312 /* reset the bydst and inexact table in all directions */ 1313 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 1314 struct hlist_node *n; 1315 1316 hlist_for_each_entry_safe(policy, n, 1317 &net->xfrm.policy_inexact[dir], 1318 bydst_inexact_list) { 1319 hlist_del_rcu(&policy->bydst); 1320 hlist_del_init(&policy->bydst_inexact_list); 1321 } 1322 1323 hmask = net->xfrm.policy_bydst[dir].hmask; 1324 odst = net->xfrm.policy_bydst[dir].table; 1325 for (i = hmask; i >= 0; i--) { 1326 hlist_for_each_entry_safe(policy, n, odst + i, bydst) 1327 hlist_del_rcu(&policy->bydst); 1328 } 1329 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { 1330 /* dir out => dst = remote, src = local */ 1331 net->xfrm.policy_bydst[dir].dbits4 = rbits4; 1332 net->xfrm.policy_bydst[dir].sbits4 = lbits4; 1333 net->xfrm.policy_bydst[dir].dbits6 = rbits6; 1334 net->xfrm.policy_bydst[dir].sbits6 = lbits6; 1335 } else { 1336 /* dir in/fwd => dst = local, src = remote */ 1337 net->xfrm.policy_bydst[dir].dbits4 = lbits4; 1338 net->xfrm.policy_bydst[dir].sbits4 = rbits4; 1339 net->xfrm.policy_bydst[dir].dbits6 = lbits6; 1340 net->xfrm.policy_bydst[dir].sbits6 = rbits6; 1341 } 1342 } 1343 1344 /* re-insert all policies by order of creation */ 1345 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 1346 if (policy->walk.dead) 1347 continue; 1348 dir = xfrm_policy_id2dir(policy->index); 1349 if (dir >= XFRM_POLICY_MAX) { 1350 /* skip socket policies */ 1351 continue; 1352 } 1353 newpos = NULL; 1354 chain = policy_hash_bysel(net, &policy->selector, 1355 policy->family, dir); 1356 1357 if (!chain) { 1358 void *p = xfrm_policy_inexact_insert(policy, dir, 0); 1359 1360 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p)); 1361 continue; 1362 } 1363 1364 hlist_for_each_entry(pol, chain, bydst) { 1365 if (policy->priority >= pol->priority) 1366 newpos = &pol->bydst; 1367 else 1368 break; 1369 } 1370 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET) 1371 hlist_add_behind_rcu(&policy->bydst, newpos); 1372 else 1373 hlist_add_head_rcu(&policy->bydst, chain); 1374 } 1375 1376 out_unlock: 1377 __xfrm_policy_inexact_flush(net); 1378 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation); 1379 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1380 1381 mutex_unlock(&hash_resize_mutex); 1382 } 1383 1384 void xfrm_policy_hash_rebuild(struct net *net) 1385 { 1386 schedule_work(&net->xfrm.policy_hthresh.work); 1387 } 1388 EXPORT_SYMBOL(xfrm_policy_hash_rebuild); 1389 1390 /* Generate new index... KAME seems to generate them ordered by cost 1391 * of an absolute inpredictability of ordering of rules. This will not pass. */ 1392 static u32 xfrm_gen_index(struct net *net, int dir, u32 index) 1393 { 1394 for (;;) { 1395 struct hlist_head *list; 1396 struct xfrm_policy *p; 1397 u32 idx; 1398 int found; 1399 1400 if (!index) { 1401 idx = (net->xfrm.idx_generator | dir); 1402 net->xfrm.idx_generator += 8; 1403 } else { 1404 idx = index; 1405 index = 0; 1406 } 1407 1408 if (idx == 0) 1409 idx = 8; 1410 list = net->xfrm.policy_byidx + idx_hash(net, idx); 1411 found = 0; 1412 hlist_for_each_entry(p, list, byidx) { 1413 if (p->index == idx) { 1414 found = 1; 1415 break; 1416 } 1417 } 1418 if (!found) 1419 return idx; 1420 } 1421 } 1422 1423 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 1424 { 1425 u32 *p1 = (u32 *) s1; 1426 u32 *p2 = (u32 *) s2; 1427 int len = sizeof(struct xfrm_selector) / sizeof(u32); 1428 int i; 1429 1430 for (i = 0; i < len; i++) { 1431 if (p1[i] != p2[i]) 1432 return 1; 1433 } 1434 1435 return 0; 1436 } 1437 1438 static void xfrm_policy_requeue(struct xfrm_policy *old, 1439 struct xfrm_policy *new) 1440 { 1441 struct xfrm_policy_queue *pq = &old->polq; 1442 struct sk_buff_head list; 1443 1444 if (skb_queue_empty(&pq->hold_queue)) 1445 return; 1446 1447 __skb_queue_head_init(&list); 1448 1449 spin_lock_bh(&pq->hold_queue.lock); 1450 skb_queue_splice_init(&pq->hold_queue, &list); 1451 if (del_timer(&pq->hold_timer)) 1452 xfrm_pol_put(old); 1453 spin_unlock_bh(&pq->hold_queue.lock); 1454 1455 pq = &new->polq; 1456 1457 spin_lock_bh(&pq->hold_queue.lock); 1458 skb_queue_splice(&list, &pq->hold_queue); 1459 pq->timeout = XFRM_QUEUE_TMO_MIN; 1460 if (!mod_timer(&pq->hold_timer, jiffies)) 1461 xfrm_pol_hold(new); 1462 spin_unlock_bh(&pq->hold_queue.lock); 1463 } 1464 1465 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark, 1466 struct xfrm_policy *pol) 1467 { 1468 return mark->v == pol->mark.v && mark->m == pol->mark.m; 1469 } 1470 1471 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed) 1472 { 1473 const struct xfrm_pol_inexact_key *k = data; 1474 u32 a = k->type << 24 | k->dir << 16 | k->family; 1475 1476 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)), 1477 seed); 1478 } 1479 1480 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed) 1481 { 1482 const struct xfrm_pol_inexact_bin *b = data; 1483 1484 return xfrm_pol_bin_key(&b->k, 0, seed); 1485 } 1486 1487 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg, 1488 const void *ptr) 1489 { 1490 const struct xfrm_pol_inexact_key *key = arg->key; 1491 const struct xfrm_pol_inexact_bin *b = ptr; 1492 int ret; 1493 1494 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net))) 1495 return -1; 1496 1497 ret = b->k.dir ^ key->dir; 1498 if (ret) 1499 return ret; 1500 1501 ret = b->k.type ^ key->type; 1502 if (ret) 1503 return ret; 1504 1505 ret = b->k.family ^ key->family; 1506 if (ret) 1507 return ret; 1508 1509 return b->k.if_id ^ key->if_id; 1510 } 1511 1512 static const struct rhashtable_params xfrm_pol_inexact_params = { 1513 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head), 1514 .hashfn = xfrm_pol_bin_key, 1515 .obj_hashfn = xfrm_pol_bin_obj, 1516 .obj_cmpfn = xfrm_pol_bin_cmp, 1517 .automatic_shrinking = true, 1518 }; 1519 1520 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, 1521 struct xfrm_policy *policy) 1522 { 1523 struct xfrm_policy *pol, *delpol = NULL; 1524 struct hlist_node *newpos = NULL; 1525 int i = 0; 1526 1527 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 1528 if (pol->type == policy->type && 1529 pol->if_id == policy->if_id && 1530 !selector_cmp(&pol->selector, &policy->selector) && 1531 xfrm_policy_mark_match(&policy->mark, pol) && 1532 xfrm_sec_ctx_match(pol->security, policy->security) && 1533 !WARN_ON(delpol)) { 1534 delpol = pol; 1535 if (policy->priority > pol->priority) 1536 continue; 1537 } else if (policy->priority >= pol->priority) { 1538 newpos = &pol->bydst_inexact_list; 1539 continue; 1540 } 1541 if (delpol) 1542 break; 1543 } 1544 1545 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET) 1546 hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos); 1547 else 1548 hlist_add_head_rcu(&policy->bydst_inexact_list, chain); 1549 1550 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 1551 pol->pos = i; 1552 i++; 1553 } 1554 } 1555 1556 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain, 1557 struct xfrm_policy *policy, 1558 bool excl) 1559 { 1560 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL; 1561 1562 hlist_for_each_entry(pol, chain, bydst) { 1563 if (pol->type == policy->type && 1564 pol->if_id == policy->if_id && 1565 !selector_cmp(&pol->selector, &policy->selector) && 1566 xfrm_policy_mark_match(&policy->mark, pol) && 1567 xfrm_sec_ctx_match(pol->security, policy->security) && 1568 !WARN_ON(delpol)) { 1569 if (excl) 1570 return ERR_PTR(-EEXIST); 1571 delpol = pol; 1572 if (policy->priority > pol->priority) 1573 continue; 1574 } else if (policy->priority >= pol->priority) { 1575 newpos = pol; 1576 continue; 1577 } 1578 if (delpol) 1579 break; 1580 } 1581 1582 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET) 1583 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst); 1584 else 1585 /* Packet offload policies enter to the head 1586 * to speed-up lookups. 1587 */ 1588 hlist_add_head_rcu(&policy->bydst, chain); 1589 1590 return delpol; 1591 } 1592 1593 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 1594 { 1595 struct net *net = xp_net(policy); 1596 struct xfrm_policy *delpol; 1597 struct hlist_head *chain; 1598 1599 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1600 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 1601 if (chain) 1602 delpol = xfrm_policy_insert_list(chain, policy, excl); 1603 else 1604 delpol = xfrm_policy_inexact_insert(policy, dir, excl); 1605 1606 if (IS_ERR(delpol)) { 1607 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1608 return PTR_ERR(delpol); 1609 } 1610 1611 __xfrm_policy_link(policy, dir); 1612 1613 /* After previous checking, family can either be AF_INET or AF_INET6 */ 1614 if (policy->family == AF_INET) 1615 rt_genid_bump_ipv4(net); 1616 else 1617 rt_genid_bump_ipv6(net); 1618 1619 if (delpol) { 1620 xfrm_policy_requeue(delpol, policy); 1621 __xfrm_policy_unlink(delpol, dir); 1622 } 1623 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index); 1624 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 1625 policy->curlft.add_time = ktime_get_real_seconds(); 1626 policy->curlft.use_time = 0; 1627 if (!mod_timer(&policy->timer, jiffies + HZ)) 1628 xfrm_pol_hold(policy); 1629 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1630 1631 if (delpol) 1632 xfrm_policy_kill(delpol); 1633 else if (xfrm_bydst_should_resize(net, dir, NULL)) 1634 schedule_work(&net->xfrm.policy_hash_work); 1635 1636 return 0; 1637 } 1638 EXPORT_SYMBOL(xfrm_policy_insert); 1639 1640 static struct xfrm_policy * 1641 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark, 1642 u32 if_id, u8 type, int dir, struct xfrm_selector *sel, 1643 struct xfrm_sec_ctx *ctx) 1644 { 1645 struct xfrm_policy *pol; 1646 1647 if (!chain) 1648 return NULL; 1649 1650 hlist_for_each_entry(pol, chain, bydst) { 1651 if (pol->type == type && 1652 pol->if_id == if_id && 1653 xfrm_policy_mark_match(mark, pol) && 1654 !selector_cmp(sel, &pol->selector) && 1655 xfrm_sec_ctx_match(ctx, pol->security)) 1656 return pol; 1657 } 1658 1659 return NULL; 1660 } 1661 1662 struct xfrm_policy * 1663 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id, 1664 u8 type, int dir, struct xfrm_selector *sel, 1665 struct xfrm_sec_ctx *ctx, int delete, int *err) 1666 { 1667 struct xfrm_pol_inexact_bin *bin = NULL; 1668 struct xfrm_policy *pol, *ret = NULL; 1669 struct hlist_head *chain; 1670 1671 *err = 0; 1672 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1673 chain = policy_hash_bysel(net, sel, sel->family, dir); 1674 if (!chain) { 1675 struct xfrm_pol_inexact_candidates cand; 1676 int i; 1677 1678 bin = xfrm_policy_inexact_lookup(net, type, 1679 sel->family, dir, if_id); 1680 if (!bin) { 1681 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1682 return NULL; 1683 } 1684 1685 if (!xfrm_policy_find_inexact_candidates(&cand, bin, 1686 &sel->saddr, 1687 &sel->daddr)) { 1688 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1689 return NULL; 1690 } 1691 1692 pol = NULL; 1693 for (i = 0; i < ARRAY_SIZE(cand.res); i++) { 1694 struct xfrm_policy *tmp; 1695 1696 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark, 1697 if_id, type, dir, 1698 sel, ctx); 1699 if (!tmp) 1700 continue; 1701 1702 if (!pol || tmp->pos < pol->pos) 1703 pol = tmp; 1704 } 1705 } else { 1706 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir, 1707 sel, ctx); 1708 } 1709 1710 if (pol) { 1711 xfrm_pol_hold(pol); 1712 if (delete) { 1713 *err = security_xfrm_policy_delete(pol->security); 1714 if (*err) { 1715 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1716 return pol; 1717 } 1718 __xfrm_policy_unlink(pol, dir); 1719 } 1720 ret = pol; 1721 } 1722 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1723 1724 if (ret && delete) 1725 xfrm_policy_kill(ret); 1726 if (bin && delete) 1727 xfrm_policy_inexact_prune_bin(bin); 1728 return ret; 1729 } 1730 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 1731 1732 struct xfrm_policy * 1733 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id, 1734 u8 type, int dir, u32 id, int delete, int *err) 1735 { 1736 struct xfrm_policy *pol, *ret; 1737 struct hlist_head *chain; 1738 1739 *err = -ENOENT; 1740 if (xfrm_policy_id2dir(id) != dir) 1741 return NULL; 1742 1743 *err = 0; 1744 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1745 chain = net->xfrm.policy_byidx + idx_hash(net, id); 1746 ret = NULL; 1747 hlist_for_each_entry(pol, chain, byidx) { 1748 if (pol->type == type && pol->index == id && 1749 pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) { 1750 xfrm_pol_hold(pol); 1751 if (delete) { 1752 *err = security_xfrm_policy_delete( 1753 pol->security); 1754 if (*err) { 1755 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1756 return pol; 1757 } 1758 __xfrm_policy_unlink(pol, dir); 1759 } 1760 ret = pol; 1761 break; 1762 } 1763 } 1764 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1765 1766 if (ret && delete) 1767 xfrm_policy_kill(ret); 1768 return ret; 1769 } 1770 EXPORT_SYMBOL(xfrm_policy_byid); 1771 1772 #ifdef CONFIG_SECURITY_NETWORK_XFRM 1773 static inline int 1774 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid) 1775 { 1776 struct xfrm_policy *pol; 1777 int err = 0; 1778 1779 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1780 if (pol->walk.dead || 1781 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX || 1782 pol->type != type) 1783 continue; 1784 1785 err = security_xfrm_policy_delete(pol->security); 1786 if (err) { 1787 xfrm_audit_policy_delete(pol, 0, task_valid); 1788 return err; 1789 } 1790 } 1791 return err; 1792 } 1793 1794 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net, 1795 struct net_device *dev, 1796 bool task_valid) 1797 { 1798 struct xfrm_policy *pol; 1799 int err = 0; 1800 1801 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1802 if (pol->walk.dead || 1803 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX || 1804 pol->xdo.dev != dev) 1805 continue; 1806 1807 err = security_xfrm_policy_delete(pol->security); 1808 if (err) { 1809 xfrm_audit_policy_delete(pol, 0, task_valid); 1810 return err; 1811 } 1812 } 1813 return err; 1814 } 1815 #else 1816 static inline int 1817 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid) 1818 { 1819 return 0; 1820 } 1821 1822 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net, 1823 struct net_device *dev, 1824 bool task_valid) 1825 { 1826 return 0; 1827 } 1828 #endif 1829 1830 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid) 1831 { 1832 int dir, err = 0, cnt = 0; 1833 struct xfrm_policy *pol; 1834 1835 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1836 1837 err = xfrm_policy_flush_secctx_check(net, type, task_valid); 1838 if (err) 1839 goto out; 1840 1841 again: 1842 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1843 if (pol->walk.dead) 1844 continue; 1845 1846 dir = xfrm_policy_id2dir(pol->index); 1847 if (dir >= XFRM_POLICY_MAX || 1848 pol->type != type) 1849 continue; 1850 1851 __xfrm_policy_unlink(pol, dir); 1852 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1853 xfrm_dev_policy_delete(pol); 1854 cnt++; 1855 xfrm_audit_policy_delete(pol, 1, task_valid); 1856 xfrm_policy_kill(pol); 1857 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1858 goto again; 1859 } 1860 if (cnt) 1861 __xfrm_policy_inexact_flush(net); 1862 else 1863 err = -ESRCH; 1864 out: 1865 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1866 return err; 1867 } 1868 EXPORT_SYMBOL(xfrm_policy_flush); 1869 1870 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev, 1871 bool task_valid) 1872 { 1873 int dir, err = 0, cnt = 0; 1874 struct xfrm_policy *pol; 1875 1876 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1877 1878 err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid); 1879 if (err) 1880 goto out; 1881 1882 again: 1883 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1884 if (pol->walk.dead) 1885 continue; 1886 1887 dir = xfrm_policy_id2dir(pol->index); 1888 if (dir >= XFRM_POLICY_MAX || 1889 pol->xdo.dev != dev) 1890 continue; 1891 1892 __xfrm_policy_unlink(pol, dir); 1893 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1894 xfrm_dev_policy_delete(pol); 1895 cnt++; 1896 xfrm_audit_policy_delete(pol, 1, task_valid); 1897 xfrm_policy_kill(pol); 1898 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1899 goto again; 1900 } 1901 if (cnt) 1902 __xfrm_policy_inexact_flush(net); 1903 else 1904 err = -ESRCH; 1905 out: 1906 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1907 return err; 1908 } 1909 EXPORT_SYMBOL(xfrm_dev_policy_flush); 1910 1911 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 1912 int (*func)(struct xfrm_policy *, int, int, void*), 1913 void *data) 1914 { 1915 struct xfrm_policy *pol; 1916 struct xfrm_policy_walk_entry *x; 1917 int error = 0; 1918 1919 if (walk->type >= XFRM_POLICY_TYPE_MAX && 1920 walk->type != XFRM_POLICY_TYPE_ANY) 1921 return -EINVAL; 1922 1923 if (list_empty(&walk->walk.all) && walk->seq != 0) 1924 return 0; 1925 1926 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1927 if (list_empty(&walk->walk.all)) 1928 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); 1929 else 1930 x = list_first_entry(&walk->walk.all, 1931 struct xfrm_policy_walk_entry, all); 1932 1933 list_for_each_entry_from(x, &net->xfrm.policy_all, all) { 1934 if (x->dead) 1935 continue; 1936 pol = container_of(x, struct xfrm_policy, walk); 1937 if (walk->type != XFRM_POLICY_TYPE_ANY && 1938 walk->type != pol->type) 1939 continue; 1940 error = func(pol, xfrm_policy_id2dir(pol->index), 1941 walk->seq, data); 1942 if (error) { 1943 list_move_tail(&walk->walk.all, &x->all); 1944 goto out; 1945 } 1946 walk->seq++; 1947 } 1948 if (walk->seq == 0) { 1949 error = -ENOENT; 1950 goto out; 1951 } 1952 list_del_init(&walk->walk.all); 1953 out: 1954 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1955 return error; 1956 } 1957 EXPORT_SYMBOL(xfrm_policy_walk); 1958 1959 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) 1960 { 1961 INIT_LIST_HEAD(&walk->walk.all); 1962 walk->walk.dead = 1; 1963 walk->type = type; 1964 walk->seq = 0; 1965 } 1966 EXPORT_SYMBOL(xfrm_policy_walk_init); 1967 1968 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net) 1969 { 1970 if (list_empty(&walk->walk.all)) 1971 return; 1972 1973 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */ 1974 list_del(&walk->walk.all); 1975 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1976 } 1977 EXPORT_SYMBOL(xfrm_policy_walk_done); 1978 1979 /* 1980 * Find policy to apply to this flow. 1981 * 1982 * Returns 0 if policy found, else an -errno. 1983 */ 1984 static int xfrm_policy_match(const struct xfrm_policy *pol, 1985 const struct flowi *fl, 1986 u8 type, u16 family, u32 if_id) 1987 { 1988 const struct xfrm_selector *sel = &pol->selector; 1989 int ret = -ESRCH; 1990 bool match; 1991 1992 if (pol->family != family || 1993 pol->if_id != if_id || 1994 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 1995 pol->type != type) 1996 return ret; 1997 1998 match = xfrm_selector_match(sel, fl, family); 1999 if (match) 2000 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid); 2001 return ret; 2002 } 2003 2004 static struct xfrm_pol_inexact_node * 2005 xfrm_policy_lookup_inexact_addr(const struct rb_root *r, 2006 seqcount_spinlock_t *count, 2007 const xfrm_address_t *addr, u16 family) 2008 { 2009 const struct rb_node *parent; 2010 int seq; 2011 2012 again: 2013 seq = read_seqcount_begin(count); 2014 2015 parent = rcu_dereference_raw(r->rb_node); 2016 while (parent) { 2017 struct xfrm_pol_inexact_node *node; 2018 int delta; 2019 2020 node = rb_entry(parent, struct xfrm_pol_inexact_node, node); 2021 2022 delta = xfrm_policy_addr_delta(addr, &node->addr, 2023 node->prefixlen, family); 2024 if (delta < 0) { 2025 parent = rcu_dereference_raw(parent->rb_left); 2026 continue; 2027 } else if (delta > 0) { 2028 parent = rcu_dereference_raw(parent->rb_right); 2029 continue; 2030 } 2031 2032 return node; 2033 } 2034 2035 if (read_seqcount_retry(count, seq)) 2036 goto again; 2037 2038 return NULL; 2039 } 2040 2041 static bool 2042 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand, 2043 struct xfrm_pol_inexact_bin *b, 2044 const xfrm_address_t *saddr, 2045 const xfrm_address_t *daddr) 2046 { 2047 struct xfrm_pol_inexact_node *n; 2048 u16 family; 2049 2050 if (!b) 2051 return false; 2052 2053 family = b->k.family; 2054 memset(cand, 0, sizeof(*cand)); 2055 cand->res[XFRM_POL_CAND_ANY] = &b->hhead; 2056 2057 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr, 2058 family); 2059 if (n) { 2060 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead; 2061 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr, 2062 family); 2063 if (n) 2064 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead; 2065 } 2066 2067 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr, 2068 family); 2069 if (n) 2070 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead; 2071 2072 return true; 2073 } 2074 2075 static struct xfrm_pol_inexact_bin * 2076 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family, 2077 u8 dir, u32 if_id) 2078 { 2079 struct xfrm_pol_inexact_key k = { 2080 .family = family, 2081 .type = type, 2082 .dir = dir, 2083 .if_id = if_id, 2084 }; 2085 2086 write_pnet(&k.net, net); 2087 2088 return rhashtable_lookup(&xfrm_policy_inexact_table, &k, 2089 xfrm_pol_inexact_params); 2090 } 2091 2092 static struct xfrm_pol_inexact_bin * 2093 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, 2094 u8 dir, u32 if_id) 2095 { 2096 struct xfrm_pol_inexact_bin *bin; 2097 2098 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 2099 2100 rcu_read_lock(); 2101 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id); 2102 rcu_read_unlock(); 2103 2104 return bin; 2105 } 2106 2107 static struct xfrm_policy * 2108 __xfrm_policy_eval_candidates(struct hlist_head *chain, 2109 struct xfrm_policy *prefer, 2110 const struct flowi *fl, 2111 u8 type, u16 family, u32 if_id) 2112 { 2113 u32 priority = prefer ? prefer->priority : ~0u; 2114 struct xfrm_policy *pol; 2115 2116 if (!chain) 2117 return NULL; 2118 2119 hlist_for_each_entry_rcu(pol, chain, bydst) { 2120 int err; 2121 2122 if (pol->priority > priority) 2123 break; 2124 2125 err = xfrm_policy_match(pol, fl, type, family, if_id); 2126 if (err) { 2127 if (err != -ESRCH) 2128 return ERR_PTR(err); 2129 2130 continue; 2131 } 2132 2133 if (prefer) { 2134 /* matches. Is it older than *prefer? */ 2135 if (pol->priority == priority && 2136 prefer->pos < pol->pos) 2137 return prefer; 2138 } 2139 2140 return pol; 2141 } 2142 2143 return NULL; 2144 } 2145 2146 static struct xfrm_policy * 2147 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand, 2148 struct xfrm_policy *prefer, 2149 const struct flowi *fl, 2150 u8 type, u16 family, u32 if_id) 2151 { 2152 struct xfrm_policy *tmp; 2153 int i; 2154 2155 for (i = 0; i < ARRAY_SIZE(cand->res); i++) { 2156 tmp = __xfrm_policy_eval_candidates(cand->res[i], 2157 prefer, 2158 fl, type, family, if_id); 2159 if (!tmp) 2160 continue; 2161 2162 if (IS_ERR(tmp)) 2163 return tmp; 2164 prefer = tmp; 2165 } 2166 2167 return prefer; 2168 } 2169 2170 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, 2171 const struct flowi *fl, 2172 u16 family, u8 dir, 2173 u32 if_id) 2174 { 2175 struct xfrm_pol_inexact_candidates cand; 2176 const xfrm_address_t *daddr, *saddr; 2177 struct xfrm_pol_inexact_bin *bin; 2178 struct xfrm_policy *pol, *ret; 2179 struct hlist_head *chain; 2180 unsigned int sequence; 2181 int err; 2182 2183 daddr = xfrm_flowi_daddr(fl, family); 2184 saddr = xfrm_flowi_saddr(fl, family); 2185 if (unlikely(!daddr || !saddr)) 2186 return NULL; 2187 2188 rcu_read_lock(); 2189 retry: 2190 do { 2191 sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); 2192 chain = policy_hash_direct(net, daddr, saddr, family, dir); 2193 } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence)); 2194 2195 ret = NULL; 2196 hlist_for_each_entry_rcu(pol, chain, bydst) { 2197 err = xfrm_policy_match(pol, fl, type, family, if_id); 2198 if (err) { 2199 if (err == -ESRCH) 2200 continue; 2201 else { 2202 ret = ERR_PTR(err); 2203 goto fail; 2204 } 2205 } else { 2206 ret = pol; 2207 break; 2208 } 2209 } 2210 if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET) 2211 goto skip_inexact; 2212 2213 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id); 2214 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr, 2215 daddr)) 2216 goto skip_inexact; 2217 2218 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type, 2219 family, if_id); 2220 if (pol) { 2221 ret = pol; 2222 if (IS_ERR(pol)) 2223 goto fail; 2224 } 2225 2226 skip_inexact: 2227 if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence)) 2228 goto retry; 2229 2230 if (ret && !xfrm_pol_hold_rcu(ret)) 2231 goto retry; 2232 fail: 2233 rcu_read_unlock(); 2234 2235 return ret; 2236 } 2237 2238 static struct xfrm_policy *xfrm_policy_lookup(struct net *net, 2239 const struct flowi *fl, 2240 u16 family, u8 dir, u32 if_id) 2241 { 2242 #ifdef CONFIG_XFRM_SUB_POLICY 2243 struct xfrm_policy *pol; 2244 2245 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, 2246 dir, if_id); 2247 if (pol != NULL) 2248 return pol; 2249 #endif 2250 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, 2251 dir, if_id); 2252 } 2253 2254 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, 2255 const struct flowi *fl, 2256 u16 family, u32 if_id) 2257 { 2258 struct xfrm_policy *pol; 2259 2260 rcu_read_lock(); 2261 again: 2262 pol = rcu_dereference(sk->sk_policy[dir]); 2263 if (pol != NULL) { 2264 bool match; 2265 int err = 0; 2266 2267 if (pol->family != family) { 2268 pol = NULL; 2269 goto out; 2270 } 2271 2272 match = xfrm_selector_match(&pol->selector, fl, family); 2273 if (match) { 2274 if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v || 2275 pol->if_id != if_id) { 2276 pol = NULL; 2277 goto out; 2278 } 2279 err = security_xfrm_policy_lookup(pol->security, 2280 fl->flowi_secid); 2281 if (!err) { 2282 if (!xfrm_pol_hold_rcu(pol)) 2283 goto again; 2284 } else if (err == -ESRCH) { 2285 pol = NULL; 2286 } else { 2287 pol = ERR_PTR(err); 2288 } 2289 } else 2290 pol = NULL; 2291 } 2292 out: 2293 rcu_read_unlock(); 2294 return pol; 2295 } 2296 2297 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 2298 { 2299 struct net *net = xp_net(pol); 2300 2301 list_add(&pol->walk.all, &net->xfrm.policy_all); 2302 net->xfrm.policy_count[dir]++; 2303 xfrm_pol_hold(pol); 2304 } 2305 2306 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 2307 int dir) 2308 { 2309 struct net *net = xp_net(pol); 2310 2311 if (list_empty(&pol->walk.all)) 2312 return NULL; 2313 2314 /* Socket policies are not hashed. */ 2315 if (!hlist_unhashed(&pol->bydst)) { 2316 hlist_del_rcu(&pol->bydst); 2317 hlist_del_init(&pol->bydst_inexact_list); 2318 hlist_del(&pol->byidx); 2319 } 2320 2321 list_del_init(&pol->walk.all); 2322 net->xfrm.policy_count[dir]--; 2323 2324 return pol; 2325 } 2326 2327 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir) 2328 { 2329 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir); 2330 } 2331 2332 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir) 2333 { 2334 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir); 2335 } 2336 2337 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 2338 { 2339 struct net *net = xp_net(pol); 2340 2341 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2342 pol = __xfrm_policy_unlink(pol, dir); 2343 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2344 if (pol) { 2345 xfrm_dev_policy_delete(pol); 2346 xfrm_policy_kill(pol); 2347 return 0; 2348 } 2349 return -ENOENT; 2350 } 2351 EXPORT_SYMBOL(xfrm_policy_delete); 2352 2353 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 2354 { 2355 struct net *net = sock_net(sk); 2356 struct xfrm_policy *old_pol; 2357 2358 #ifdef CONFIG_XFRM_SUB_POLICY 2359 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 2360 return -EINVAL; 2361 #endif 2362 2363 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2364 old_pol = rcu_dereference_protected(sk->sk_policy[dir], 2365 lockdep_is_held(&net->xfrm.xfrm_policy_lock)); 2366 if (pol) { 2367 pol->curlft.add_time = ktime_get_real_seconds(); 2368 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); 2369 xfrm_sk_policy_link(pol, dir); 2370 } 2371 rcu_assign_pointer(sk->sk_policy[dir], pol); 2372 if (old_pol) { 2373 if (pol) 2374 xfrm_policy_requeue(old_pol, pol); 2375 2376 /* Unlinking succeeds always. This is the only function 2377 * allowed to delete or replace socket policy. 2378 */ 2379 xfrm_sk_policy_unlink(old_pol, dir); 2380 } 2381 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2382 2383 if (old_pol) { 2384 xfrm_policy_kill(old_pol); 2385 } 2386 return 0; 2387 } 2388 2389 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) 2390 { 2391 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); 2392 struct net *net = xp_net(old); 2393 2394 if (newp) { 2395 newp->selector = old->selector; 2396 if (security_xfrm_policy_clone(old->security, 2397 &newp->security)) { 2398 kfree(newp); 2399 return NULL; /* ENOMEM */ 2400 } 2401 newp->lft = old->lft; 2402 newp->curlft = old->curlft; 2403 newp->mark = old->mark; 2404 newp->if_id = old->if_id; 2405 newp->action = old->action; 2406 newp->flags = old->flags; 2407 newp->xfrm_nr = old->xfrm_nr; 2408 newp->index = old->index; 2409 newp->type = old->type; 2410 newp->family = old->family; 2411 memcpy(newp->xfrm_vec, old->xfrm_vec, 2412 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 2413 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2414 xfrm_sk_policy_link(newp, dir); 2415 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2416 xfrm_pol_put(newp); 2417 } 2418 return newp; 2419 } 2420 2421 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) 2422 { 2423 const struct xfrm_policy *p; 2424 struct xfrm_policy *np; 2425 int i, ret = 0; 2426 2427 rcu_read_lock(); 2428 for (i = 0; i < 2; i++) { 2429 p = rcu_dereference(osk->sk_policy[i]); 2430 if (p) { 2431 np = clone_policy(p, i); 2432 if (unlikely(!np)) { 2433 ret = -ENOMEM; 2434 break; 2435 } 2436 rcu_assign_pointer(sk->sk_policy[i], np); 2437 } 2438 } 2439 rcu_read_unlock(); 2440 return ret; 2441 } 2442 2443 static int 2444 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local, 2445 xfrm_address_t *remote, unsigned short family, u32 mark) 2446 { 2447 int err; 2448 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2449 2450 if (unlikely(afinfo == NULL)) 2451 return -EINVAL; 2452 err = afinfo->get_saddr(net, oif, local, remote, mark); 2453 rcu_read_unlock(); 2454 return err; 2455 } 2456 2457 /* Resolve list of templates for the flow, given policy. */ 2458 2459 static int 2460 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, 2461 struct xfrm_state **xfrm, unsigned short family) 2462 { 2463 struct net *net = xp_net(policy); 2464 int nx; 2465 int i, error; 2466 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 2467 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 2468 xfrm_address_t tmp; 2469 2470 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) { 2471 struct xfrm_state *x; 2472 xfrm_address_t *remote = daddr; 2473 xfrm_address_t *local = saddr; 2474 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 2475 2476 if (tmpl->mode == XFRM_MODE_TUNNEL || 2477 tmpl->mode == XFRM_MODE_BEET) { 2478 remote = &tmpl->id.daddr; 2479 local = &tmpl->saddr; 2480 if (xfrm_addr_any(local, tmpl->encap_family)) { 2481 error = xfrm_get_saddr(net, fl->flowi_oif, 2482 &tmp, remote, 2483 tmpl->encap_family, 0); 2484 if (error) 2485 goto fail; 2486 local = &tmp; 2487 } 2488 } 2489 2490 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, 2491 family, policy->if_id); 2492 if (x && x->dir && x->dir != XFRM_SA_DIR_OUT) { 2493 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEDIRERROR); 2494 xfrm_state_put(x); 2495 error = -EINVAL; 2496 goto fail; 2497 } 2498 2499 if (x && x->km.state == XFRM_STATE_VALID) { 2500 xfrm[nx++] = x; 2501 daddr = remote; 2502 saddr = local; 2503 continue; 2504 } 2505 if (x) { 2506 error = (x->km.state == XFRM_STATE_ERROR ? 2507 -EINVAL : -EAGAIN); 2508 xfrm_state_put(x); 2509 } else if (error == -ESRCH) { 2510 error = -EAGAIN; 2511 } 2512 2513 if (!tmpl->optional) 2514 goto fail; 2515 } 2516 return nx; 2517 2518 fail: 2519 for (nx--; nx >= 0; nx--) 2520 xfrm_state_put(xfrm[nx]); 2521 return error; 2522 } 2523 2524 static int 2525 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, 2526 struct xfrm_state **xfrm, unsigned short family) 2527 { 2528 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 2529 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 2530 int cnx = 0; 2531 int error; 2532 int ret; 2533 int i; 2534 2535 for (i = 0; i < npols; i++) { 2536 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 2537 error = -ENOBUFS; 2538 goto fail; 2539 } 2540 2541 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 2542 if (ret < 0) { 2543 error = ret; 2544 goto fail; 2545 } else 2546 cnx += ret; 2547 } 2548 2549 /* found states are sorted for outbound processing */ 2550 if (npols > 1) 2551 xfrm_state_sort(xfrm, tpp, cnx, family); 2552 2553 return cnx; 2554 2555 fail: 2556 for (cnx--; cnx >= 0; cnx--) 2557 xfrm_state_put(tpp[cnx]); 2558 return error; 2559 2560 } 2561 2562 static int xfrm_get_tos(const struct flowi *fl, int family) 2563 { 2564 if (family == AF_INET) 2565 return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; 2566 2567 return 0; 2568 } 2569 2570 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 2571 { 2572 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2573 struct dst_ops *dst_ops; 2574 struct xfrm_dst *xdst; 2575 2576 if (!afinfo) 2577 return ERR_PTR(-EINVAL); 2578 2579 switch (family) { 2580 case AF_INET: 2581 dst_ops = &net->xfrm.xfrm4_dst_ops; 2582 break; 2583 #if IS_ENABLED(CONFIG_IPV6) 2584 case AF_INET6: 2585 dst_ops = &net->xfrm.xfrm6_dst_ops; 2586 break; 2587 #endif 2588 default: 2589 BUG(); 2590 } 2591 xdst = dst_alloc(dst_ops, NULL, DST_OBSOLETE_NONE, 0); 2592 2593 if (likely(xdst)) { 2594 memset_after(xdst, 0, u.dst); 2595 } else 2596 xdst = ERR_PTR(-ENOBUFS); 2597 2598 rcu_read_unlock(); 2599 2600 return xdst; 2601 } 2602 2603 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 2604 int nfheader_len) 2605 { 2606 if (dst->ops->family == AF_INET6) { 2607 path->path_cookie = rt6_get_cookie(dst_rt6_info(dst)); 2608 path->u.rt6.rt6i_nfheader_len = nfheader_len; 2609 } 2610 } 2611 2612 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, 2613 const struct flowi *fl) 2614 { 2615 const struct xfrm_policy_afinfo *afinfo = 2616 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 2617 int err; 2618 2619 if (!afinfo) 2620 return -EINVAL; 2621 2622 err = afinfo->fill_dst(xdst, dev, fl); 2623 2624 rcu_read_unlock(); 2625 2626 return err; 2627 } 2628 2629 2630 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 2631 * all the metrics... Shortly, bundle a bundle. 2632 */ 2633 2634 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 2635 struct xfrm_state **xfrm, 2636 struct xfrm_dst **bundle, 2637 int nx, 2638 const struct flowi *fl, 2639 struct dst_entry *dst) 2640 { 2641 const struct xfrm_state_afinfo *afinfo; 2642 const struct xfrm_mode *inner_mode; 2643 struct net *net = xp_net(policy); 2644 unsigned long now = jiffies; 2645 struct net_device *dev; 2646 struct xfrm_dst *xdst_prev = NULL; 2647 struct xfrm_dst *xdst0 = NULL; 2648 int i = 0; 2649 int err; 2650 int header_len = 0; 2651 int nfheader_len = 0; 2652 int trailer_len = 0; 2653 int tos; 2654 int family = policy->selector.family; 2655 xfrm_address_t saddr, daddr; 2656 2657 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 2658 2659 tos = xfrm_get_tos(fl, family); 2660 2661 dst_hold(dst); 2662 2663 for (; i < nx; i++) { 2664 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 2665 struct dst_entry *dst1 = &xdst->u.dst; 2666 2667 err = PTR_ERR(xdst); 2668 if (IS_ERR(xdst)) { 2669 dst_release(dst); 2670 goto put_states; 2671 } 2672 2673 bundle[i] = xdst; 2674 if (!xdst_prev) 2675 xdst0 = xdst; 2676 else 2677 /* Ref count is taken during xfrm_alloc_dst() 2678 * No need to do dst_clone() on dst1 2679 */ 2680 xfrm_dst_set_child(xdst_prev, &xdst->u.dst); 2681 2682 if (xfrm[i]->sel.family == AF_UNSPEC) { 2683 inner_mode = xfrm_ip2inner_mode(xfrm[i], 2684 xfrm_af2proto(family)); 2685 if (!inner_mode) { 2686 err = -EAFNOSUPPORT; 2687 dst_release(dst); 2688 goto put_states; 2689 } 2690 } else 2691 inner_mode = &xfrm[i]->inner_mode; 2692 2693 xdst->route = dst; 2694 dst_copy_metrics(dst1, dst); 2695 2696 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 2697 __u32 mark = 0; 2698 int oif; 2699 2700 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m) 2701 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); 2702 2703 if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET) 2704 family = xfrm[i]->props.family; 2705 2706 oif = fl->flowi_oif ? : fl->flowi_l3mdev; 2707 dst = xfrm_dst_lookup(xfrm[i], tos, oif, 2708 &saddr, &daddr, family, mark); 2709 err = PTR_ERR(dst); 2710 if (IS_ERR(dst)) 2711 goto put_states; 2712 } else 2713 dst_hold(dst); 2714 2715 dst1->xfrm = xfrm[i]; 2716 xdst->xfrm_genid = xfrm[i]->genid; 2717 2718 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 2719 dst1->lastuse = now; 2720 2721 dst1->input = dst_discard; 2722 2723 rcu_read_lock(); 2724 afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family); 2725 if (likely(afinfo)) 2726 dst1->output = afinfo->output; 2727 else 2728 dst1->output = dst_discard_out; 2729 rcu_read_unlock(); 2730 2731 xdst_prev = xdst; 2732 2733 header_len += xfrm[i]->props.header_len; 2734 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 2735 nfheader_len += xfrm[i]->props.header_len; 2736 trailer_len += xfrm[i]->props.trailer_len; 2737 } 2738 2739 xfrm_dst_set_child(xdst_prev, dst); 2740 xdst0->path = dst; 2741 2742 err = -ENODEV; 2743 dev = dst->dev; 2744 if (!dev) 2745 goto free_dst; 2746 2747 xfrm_init_path(xdst0, dst, nfheader_len); 2748 xfrm_init_pmtu(bundle, nx); 2749 2750 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst; 2751 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) { 2752 err = xfrm_fill_dst(xdst_prev, dev, fl); 2753 if (err) 2754 goto free_dst; 2755 2756 xdst_prev->u.dst.header_len = header_len; 2757 xdst_prev->u.dst.trailer_len = trailer_len; 2758 header_len -= xdst_prev->u.dst.xfrm->props.header_len; 2759 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len; 2760 } 2761 2762 return &xdst0->u.dst; 2763 2764 put_states: 2765 for (; i < nx; i++) 2766 xfrm_state_put(xfrm[i]); 2767 free_dst: 2768 if (xdst0) 2769 dst_release_immediate(&xdst0->u.dst); 2770 2771 return ERR_PTR(err); 2772 } 2773 2774 static int xfrm_expand_policies(const struct flowi *fl, u16 family, 2775 struct xfrm_policy **pols, 2776 int *num_pols, int *num_xfrms) 2777 { 2778 int i; 2779 2780 if (*num_pols == 0 || !pols[0]) { 2781 *num_pols = 0; 2782 *num_xfrms = 0; 2783 return 0; 2784 } 2785 if (IS_ERR(pols[0])) { 2786 *num_pols = 0; 2787 return PTR_ERR(pols[0]); 2788 } 2789 2790 *num_xfrms = pols[0]->xfrm_nr; 2791 2792 #ifdef CONFIG_XFRM_SUB_POLICY 2793 if (pols[0]->action == XFRM_POLICY_ALLOW && 2794 pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 2795 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), 2796 XFRM_POLICY_TYPE_MAIN, 2797 fl, family, 2798 XFRM_POLICY_OUT, 2799 pols[0]->if_id); 2800 if (pols[1]) { 2801 if (IS_ERR(pols[1])) { 2802 xfrm_pols_put(pols, *num_pols); 2803 *num_pols = 0; 2804 return PTR_ERR(pols[1]); 2805 } 2806 (*num_pols)++; 2807 (*num_xfrms) += pols[1]->xfrm_nr; 2808 } 2809 } 2810 #endif 2811 for (i = 0; i < *num_pols; i++) { 2812 if (pols[i]->action != XFRM_POLICY_ALLOW) { 2813 *num_xfrms = -1; 2814 break; 2815 } 2816 } 2817 2818 return 0; 2819 2820 } 2821 2822 static struct xfrm_dst * 2823 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, 2824 const struct flowi *fl, u16 family, 2825 struct dst_entry *dst_orig) 2826 { 2827 struct net *net = xp_net(pols[0]); 2828 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 2829 struct xfrm_dst *bundle[XFRM_MAX_DEPTH]; 2830 struct xfrm_dst *xdst; 2831 struct dst_entry *dst; 2832 int err; 2833 2834 /* Try to instantiate a bundle */ 2835 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 2836 if (err <= 0) { 2837 if (err == 0) 2838 return NULL; 2839 2840 if (err != -EAGAIN) 2841 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 2842 return ERR_PTR(err); 2843 } 2844 2845 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig); 2846 if (IS_ERR(dst)) { 2847 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); 2848 return ERR_CAST(dst); 2849 } 2850 2851 xdst = (struct xfrm_dst *)dst; 2852 xdst->num_xfrms = err; 2853 xdst->num_pols = num_pols; 2854 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 2855 xdst->policy_genid = atomic_read(&pols[0]->genid); 2856 2857 return xdst; 2858 } 2859 2860 static void xfrm_policy_queue_process(struct timer_list *t) 2861 { 2862 struct sk_buff *skb; 2863 struct sock *sk; 2864 struct dst_entry *dst; 2865 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer); 2866 struct net *net = xp_net(pol); 2867 struct xfrm_policy_queue *pq = &pol->polq; 2868 struct flowi fl; 2869 struct sk_buff_head list; 2870 __u32 skb_mark; 2871 2872 spin_lock(&pq->hold_queue.lock); 2873 skb = skb_peek(&pq->hold_queue); 2874 if (!skb) { 2875 spin_unlock(&pq->hold_queue.lock); 2876 goto out; 2877 } 2878 dst = skb_dst(skb); 2879 sk = skb->sk; 2880 2881 /* Fixup the mark to support VTI. */ 2882 skb_mark = skb->mark; 2883 skb->mark = pol->mark.v; 2884 xfrm_decode_session(net, skb, &fl, dst->ops->family); 2885 skb->mark = skb_mark; 2886 spin_unlock(&pq->hold_queue.lock); 2887 2888 dst_hold(xfrm_dst_path(dst)); 2889 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE); 2890 if (IS_ERR(dst)) 2891 goto purge_queue; 2892 2893 if (dst->flags & DST_XFRM_QUEUE) { 2894 dst_release(dst); 2895 2896 if (pq->timeout >= XFRM_QUEUE_TMO_MAX) 2897 goto purge_queue; 2898 2899 pq->timeout = pq->timeout << 1; 2900 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout)) 2901 xfrm_pol_hold(pol); 2902 goto out; 2903 } 2904 2905 dst_release(dst); 2906 2907 __skb_queue_head_init(&list); 2908 2909 spin_lock(&pq->hold_queue.lock); 2910 pq->timeout = 0; 2911 skb_queue_splice_init(&pq->hold_queue, &list); 2912 spin_unlock(&pq->hold_queue.lock); 2913 2914 while (!skb_queue_empty(&list)) { 2915 skb = __skb_dequeue(&list); 2916 2917 /* Fixup the mark to support VTI. */ 2918 skb_mark = skb->mark; 2919 skb->mark = pol->mark.v; 2920 xfrm_decode_session(net, skb, &fl, skb_dst(skb)->ops->family); 2921 skb->mark = skb_mark; 2922 2923 dst_hold(xfrm_dst_path(skb_dst(skb))); 2924 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0); 2925 if (IS_ERR(dst)) { 2926 kfree_skb(skb); 2927 continue; 2928 } 2929 2930 nf_reset_ct(skb); 2931 skb_dst_drop(skb); 2932 skb_dst_set(skb, dst); 2933 2934 dst_output(net, skb->sk, skb); 2935 } 2936 2937 out: 2938 xfrm_pol_put(pol); 2939 return; 2940 2941 purge_queue: 2942 pq->timeout = 0; 2943 skb_queue_purge(&pq->hold_queue); 2944 xfrm_pol_put(pol); 2945 } 2946 2947 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb) 2948 { 2949 unsigned long sched_next; 2950 struct dst_entry *dst = skb_dst(skb); 2951 struct xfrm_dst *xdst = (struct xfrm_dst *) dst; 2952 struct xfrm_policy *pol = xdst->pols[0]; 2953 struct xfrm_policy_queue *pq = &pol->polq; 2954 2955 if (unlikely(skb_fclone_busy(sk, skb))) { 2956 kfree_skb(skb); 2957 return 0; 2958 } 2959 2960 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { 2961 kfree_skb(skb); 2962 return -EAGAIN; 2963 } 2964 2965 skb_dst_force(skb); 2966 2967 spin_lock_bh(&pq->hold_queue.lock); 2968 2969 if (!pq->timeout) 2970 pq->timeout = XFRM_QUEUE_TMO_MIN; 2971 2972 sched_next = jiffies + pq->timeout; 2973 2974 if (del_timer(&pq->hold_timer)) { 2975 if (time_before(pq->hold_timer.expires, sched_next)) 2976 sched_next = pq->hold_timer.expires; 2977 xfrm_pol_put(pol); 2978 } 2979 2980 __skb_queue_tail(&pq->hold_queue, skb); 2981 if (!mod_timer(&pq->hold_timer, sched_next)) 2982 xfrm_pol_hold(pol); 2983 2984 spin_unlock_bh(&pq->hold_queue.lock); 2985 2986 return 0; 2987 } 2988 2989 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, 2990 struct xfrm_flo *xflo, 2991 const struct flowi *fl, 2992 int num_xfrms, 2993 u16 family) 2994 { 2995 int err; 2996 struct net_device *dev; 2997 struct dst_entry *dst; 2998 struct dst_entry *dst1; 2999 struct xfrm_dst *xdst; 3000 3001 xdst = xfrm_alloc_dst(net, family); 3002 if (IS_ERR(xdst)) 3003 return xdst; 3004 3005 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) || 3006 net->xfrm.sysctl_larval_drop || 3007 num_xfrms <= 0) 3008 return xdst; 3009 3010 dst = xflo->dst_orig; 3011 dst1 = &xdst->u.dst; 3012 dst_hold(dst); 3013 xdst->route = dst; 3014 3015 dst_copy_metrics(dst1, dst); 3016 3017 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 3018 dst1->flags |= DST_XFRM_QUEUE; 3019 dst1->lastuse = jiffies; 3020 3021 dst1->input = dst_discard; 3022 dst1->output = xdst_queue_output; 3023 3024 dst_hold(dst); 3025 xfrm_dst_set_child(xdst, dst); 3026 xdst->path = dst; 3027 3028 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0); 3029 3030 err = -ENODEV; 3031 dev = dst->dev; 3032 if (!dev) 3033 goto free_dst; 3034 3035 err = xfrm_fill_dst(xdst, dev, fl); 3036 if (err) 3037 goto free_dst; 3038 3039 out: 3040 return xdst; 3041 3042 free_dst: 3043 dst_release(dst1); 3044 xdst = ERR_PTR(err); 3045 goto out; 3046 } 3047 3048 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net, 3049 const struct flowi *fl, 3050 u16 family, u8 dir, 3051 struct xfrm_flo *xflo, u32 if_id) 3052 { 3053 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 3054 int num_pols = 0, num_xfrms = 0, err; 3055 struct xfrm_dst *xdst; 3056 3057 /* Resolve policies to use if we couldn't get them from 3058 * previous cache entry */ 3059 num_pols = 1; 3060 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id); 3061 err = xfrm_expand_policies(fl, family, pols, 3062 &num_pols, &num_xfrms); 3063 if (err < 0) 3064 goto inc_error; 3065 if (num_pols == 0) 3066 return NULL; 3067 if (num_xfrms <= 0) 3068 goto make_dummy_bundle; 3069 3070 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, 3071 xflo->dst_orig); 3072 if (IS_ERR(xdst)) { 3073 err = PTR_ERR(xdst); 3074 if (err == -EREMOTE) { 3075 xfrm_pols_put(pols, num_pols); 3076 return NULL; 3077 } 3078 3079 if (err != -EAGAIN) 3080 goto error; 3081 goto make_dummy_bundle; 3082 } else if (xdst == NULL) { 3083 num_xfrms = 0; 3084 goto make_dummy_bundle; 3085 } 3086 3087 return xdst; 3088 3089 make_dummy_bundle: 3090 /* We found policies, but there's no bundles to instantiate: 3091 * either because the policy blocks, has no transformations or 3092 * we could not build template (no xfrm_states).*/ 3093 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family); 3094 if (IS_ERR(xdst)) { 3095 xfrm_pols_put(pols, num_pols); 3096 return ERR_CAST(xdst); 3097 } 3098 xdst->num_pols = num_pols; 3099 xdst->num_xfrms = num_xfrms; 3100 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 3101 3102 return xdst; 3103 3104 inc_error: 3105 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 3106 error: 3107 xfrm_pols_put(pols, num_pols); 3108 return ERR_PTR(err); 3109 } 3110 3111 static struct dst_entry *make_blackhole(struct net *net, u16 family, 3112 struct dst_entry *dst_orig) 3113 { 3114 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 3115 struct dst_entry *ret; 3116 3117 if (!afinfo) { 3118 dst_release(dst_orig); 3119 return ERR_PTR(-EINVAL); 3120 } else { 3121 ret = afinfo->blackhole_route(net, dst_orig); 3122 } 3123 rcu_read_unlock(); 3124 3125 return ret; 3126 } 3127 3128 /* Finds/creates a bundle for given flow and if_id 3129 * 3130 * At the moment we eat a raw IP route. Mostly to speed up lookups 3131 * on interfaces with disabled IPsec. 3132 * 3133 * xfrm_lookup uses an if_id of 0 by default, and is provided for 3134 * compatibility 3135 */ 3136 struct dst_entry *xfrm_lookup_with_ifid(struct net *net, 3137 struct dst_entry *dst_orig, 3138 const struct flowi *fl, 3139 const struct sock *sk, 3140 int flags, u32 if_id) 3141 { 3142 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 3143 struct xfrm_dst *xdst; 3144 struct dst_entry *dst, *route; 3145 u16 family = dst_orig->ops->family; 3146 u8 dir = XFRM_POLICY_OUT; 3147 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 3148 3149 dst = NULL; 3150 xdst = NULL; 3151 route = NULL; 3152 3153 sk = sk_const_to_full_sk(sk); 3154 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 3155 num_pols = 1; 3156 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family, 3157 if_id); 3158 err = xfrm_expand_policies(fl, family, pols, 3159 &num_pols, &num_xfrms); 3160 if (err < 0) 3161 goto dropdst; 3162 3163 if (num_pols) { 3164 if (num_xfrms <= 0) { 3165 drop_pols = num_pols; 3166 goto no_transform; 3167 } 3168 3169 xdst = xfrm_resolve_and_create_bundle( 3170 pols, num_pols, fl, 3171 family, dst_orig); 3172 3173 if (IS_ERR(xdst)) { 3174 xfrm_pols_put(pols, num_pols); 3175 err = PTR_ERR(xdst); 3176 if (err == -EREMOTE) 3177 goto nopol; 3178 3179 goto dropdst; 3180 } else if (xdst == NULL) { 3181 num_xfrms = 0; 3182 drop_pols = num_pols; 3183 goto no_transform; 3184 } 3185 3186 route = xdst->route; 3187 } 3188 } 3189 3190 if (xdst == NULL) { 3191 struct xfrm_flo xflo; 3192 3193 xflo.dst_orig = dst_orig; 3194 xflo.flags = flags; 3195 3196 /* To accelerate a bit... */ 3197 if (!if_id && ((dst_orig->flags & DST_NOXFRM) || 3198 !net->xfrm.policy_count[XFRM_POLICY_OUT])) 3199 goto nopol; 3200 3201 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id); 3202 if (xdst == NULL) 3203 goto nopol; 3204 if (IS_ERR(xdst)) { 3205 err = PTR_ERR(xdst); 3206 goto dropdst; 3207 } 3208 3209 num_pols = xdst->num_pols; 3210 num_xfrms = xdst->num_xfrms; 3211 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols); 3212 route = xdst->route; 3213 } 3214 3215 dst = &xdst->u.dst; 3216 if (route == NULL && num_xfrms > 0) { 3217 /* The only case when xfrm_bundle_lookup() returns a 3218 * bundle with null route, is when the template could 3219 * not be resolved. It means policies are there, but 3220 * bundle could not be created, since we don't yet 3221 * have the xfrm_state's. We need to wait for KM to 3222 * negotiate new SA's or bail out with error.*/ 3223 if (net->xfrm.sysctl_larval_drop) { 3224 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 3225 err = -EREMOTE; 3226 goto error; 3227 } 3228 3229 err = -EAGAIN; 3230 3231 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 3232 goto error; 3233 } 3234 3235 no_transform: 3236 if (num_pols == 0) 3237 goto nopol; 3238 3239 if ((flags & XFRM_LOOKUP_ICMP) && 3240 !(pols[0]->flags & XFRM_POLICY_ICMP)) { 3241 err = -ENOENT; 3242 goto error; 3243 } 3244 3245 for (i = 0; i < num_pols; i++) 3246 WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds()); 3247 3248 if (num_xfrms < 0) { 3249 /* Prohibit the flow */ 3250 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 3251 err = -EPERM; 3252 goto error; 3253 } else if (num_xfrms > 0) { 3254 /* Flow transformed */ 3255 dst_release(dst_orig); 3256 } else { 3257 /* Flow passes untransformed */ 3258 dst_release(dst); 3259 dst = dst_orig; 3260 } 3261 ok: 3262 xfrm_pols_put(pols, drop_pols); 3263 if (dst && dst->xfrm && 3264 dst->xfrm->props.mode == XFRM_MODE_TUNNEL) 3265 dst->flags |= DST_XFRM_TUNNEL; 3266 return dst; 3267 3268 nopol: 3269 if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) && 3270 net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) { 3271 err = -EPERM; 3272 goto error; 3273 } 3274 if (!(flags & XFRM_LOOKUP_ICMP)) { 3275 dst = dst_orig; 3276 goto ok; 3277 } 3278 err = -ENOENT; 3279 error: 3280 dst_release(dst); 3281 dropdst: 3282 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF)) 3283 dst_release(dst_orig); 3284 xfrm_pols_put(pols, drop_pols); 3285 return ERR_PTR(err); 3286 } 3287 EXPORT_SYMBOL(xfrm_lookup_with_ifid); 3288 3289 /* Main function: finds/creates a bundle for given flow. 3290 * 3291 * At the moment we eat a raw IP route. Mostly to speed up lookups 3292 * on interfaces with disabled IPsec. 3293 */ 3294 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 3295 const struct flowi *fl, const struct sock *sk, 3296 int flags) 3297 { 3298 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0); 3299 } 3300 EXPORT_SYMBOL(xfrm_lookup); 3301 3302 /* Callers of xfrm_lookup_route() must ensure a call to dst_output(). 3303 * Otherwise we may send out blackholed packets. 3304 */ 3305 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, 3306 const struct flowi *fl, 3307 const struct sock *sk, int flags) 3308 { 3309 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, 3310 flags | XFRM_LOOKUP_QUEUE | 3311 XFRM_LOOKUP_KEEP_DST_REF); 3312 3313 if (PTR_ERR(dst) == -EREMOTE) 3314 return make_blackhole(net, dst_orig->ops->family, dst_orig); 3315 3316 if (IS_ERR(dst)) 3317 dst_release(dst_orig); 3318 3319 return dst; 3320 } 3321 EXPORT_SYMBOL(xfrm_lookup_route); 3322 3323 static inline int 3324 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) 3325 { 3326 struct sec_path *sp = skb_sec_path(skb); 3327 struct xfrm_state *x; 3328 3329 if (!sp || idx < 0 || idx >= sp->len) 3330 return 0; 3331 x = sp->xvec[idx]; 3332 if (!x->type->reject) 3333 return 0; 3334 return x->type->reject(x, skb, fl); 3335 } 3336 3337 /* When skb is transformed back to its "native" form, we have to 3338 * check policy restrictions. At the moment we make this in maximally 3339 * stupid way. Shame on me. :-) Of course, connected sockets must 3340 * have policy cached at them. 3341 */ 3342 3343 static inline int 3344 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, 3345 unsigned short family, u32 if_id) 3346 { 3347 if (xfrm_state_kern(x)) 3348 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 3349 return x->id.proto == tmpl->id.proto && 3350 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 3351 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 3352 x->props.mode == tmpl->mode && 3353 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 3354 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 3355 !(x->props.mode != XFRM_MODE_TRANSPORT && 3356 xfrm_state_addr_cmp(tmpl, x, family)) && 3357 (if_id == 0 || if_id == x->if_id); 3358 } 3359 3360 /* 3361 * 0 or more than 0 is returned when validation is succeeded (either bypass 3362 * because of optional transport mode, or next index of the matched secpath 3363 * state with the template. 3364 * -1 is returned when no matching template is found. 3365 * Otherwise "-2 - errored_index" is returned. 3366 */ 3367 static inline int 3368 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, 3369 unsigned short family, u32 if_id) 3370 { 3371 int idx = start; 3372 3373 if (tmpl->optional) { 3374 if (tmpl->mode == XFRM_MODE_TRANSPORT) 3375 return start; 3376 } else 3377 start = -1; 3378 for (; idx < sp->len; idx++) { 3379 if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id)) 3380 return ++idx; 3381 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 3382 if (idx < sp->verified_cnt) { 3383 /* Secpath entry previously verified, consider optional and 3384 * continue searching 3385 */ 3386 continue; 3387 } 3388 3389 if (start == -1) 3390 start = -2-idx; 3391 break; 3392 } 3393 } 3394 return start; 3395 } 3396 3397 static void 3398 decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse) 3399 { 3400 struct flowi4 *fl4 = &fl->u.ip4; 3401 3402 memset(fl4, 0, sizeof(struct flowi4)); 3403 3404 if (reverse) { 3405 fl4->saddr = flkeys->addrs.ipv4.dst; 3406 fl4->daddr = flkeys->addrs.ipv4.src; 3407 fl4->fl4_sport = flkeys->ports.dst; 3408 fl4->fl4_dport = flkeys->ports.src; 3409 } else { 3410 fl4->saddr = flkeys->addrs.ipv4.src; 3411 fl4->daddr = flkeys->addrs.ipv4.dst; 3412 fl4->fl4_sport = flkeys->ports.src; 3413 fl4->fl4_dport = flkeys->ports.dst; 3414 } 3415 3416 switch (flkeys->basic.ip_proto) { 3417 case IPPROTO_GRE: 3418 fl4->fl4_gre_key = flkeys->gre.keyid; 3419 break; 3420 case IPPROTO_ICMP: 3421 fl4->fl4_icmp_type = flkeys->icmp.type; 3422 fl4->fl4_icmp_code = flkeys->icmp.code; 3423 break; 3424 } 3425 3426 fl4->flowi4_proto = flkeys->basic.ip_proto; 3427 fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK; 3428 } 3429 3430 #if IS_ENABLED(CONFIG_IPV6) 3431 static void 3432 decode_session6(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse) 3433 { 3434 struct flowi6 *fl6 = &fl->u.ip6; 3435 3436 memset(fl6, 0, sizeof(struct flowi6)); 3437 3438 if (reverse) { 3439 fl6->saddr = flkeys->addrs.ipv6.dst; 3440 fl6->daddr = flkeys->addrs.ipv6.src; 3441 fl6->fl6_sport = flkeys->ports.dst; 3442 fl6->fl6_dport = flkeys->ports.src; 3443 } else { 3444 fl6->saddr = flkeys->addrs.ipv6.src; 3445 fl6->daddr = flkeys->addrs.ipv6.dst; 3446 fl6->fl6_sport = flkeys->ports.src; 3447 fl6->fl6_dport = flkeys->ports.dst; 3448 } 3449 3450 switch (flkeys->basic.ip_proto) { 3451 case IPPROTO_GRE: 3452 fl6->fl6_gre_key = flkeys->gre.keyid; 3453 break; 3454 case IPPROTO_ICMPV6: 3455 fl6->fl6_icmp_type = flkeys->icmp.type; 3456 fl6->fl6_icmp_code = flkeys->icmp.code; 3457 break; 3458 } 3459 3460 fl6->flowi6_proto = flkeys->basic.ip_proto; 3461 } 3462 #endif 3463 3464 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, 3465 unsigned int family, int reverse) 3466 { 3467 struct xfrm_flow_keys flkeys; 3468 3469 memset(&flkeys, 0, sizeof(flkeys)); 3470 __skb_flow_dissect(net, skb, &xfrm_session_dissector, &flkeys, 3471 NULL, 0, 0, 0, FLOW_DISSECTOR_F_STOP_AT_ENCAP); 3472 3473 switch (family) { 3474 case AF_INET: 3475 decode_session4(&flkeys, fl, reverse); 3476 break; 3477 #if IS_ENABLED(CONFIG_IPV6) 3478 case AF_INET6: 3479 decode_session6(&flkeys, fl, reverse); 3480 break; 3481 #endif 3482 default: 3483 return -EAFNOSUPPORT; 3484 } 3485 3486 fl->flowi_mark = skb->mark; 3487 if (reverse) { 3488 fl->flowi_oif = skb->skb_iif; 3489 } else { 3490 int oif = 0; 3491 3492 if (skb_dst(skb) && skb_dst(skb)->dev) 3493 oif = skb_dst(skb)->dev->ifindex; 3494 3495 fl->flowi_oif = oif; 3496 } 3497 3498 return security_xfrm_decode_session(skb, &fl->flowi_secid); 3499 } 3500 EXPORT_SYMBOL(__xfrm_decode_session); 3501 3502 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) 3503 { 3504 for (; k < sp->len; k++) { 3505 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 3506 *idxp = k; 3507 return 1; 3508 } 3509 } 3510 3511 return 0; 3512 } 3513 3514 static bool icmp_err_packet(const struct flowi *fl, unsigned short family) 3515 { 3516 const struct flowi4 *fl4 = &fl->u.ip4; 3517 3518 if (family == AF_INET && 3519 fl4->flowi4_proto == IPPROTO_ICMP && 3520 (fl4->fl4_icmp_type == ICMP_DEST_UNREACH || 3521 fl4->fl4_icmp_type == ICMP_TIME_EXCEEDED)) 3522 return true; 3523 3524 #if IS_ENABLED(CONFIG_IPV6) 3525 if (family == AF_INET6) { 3526 const struct flowi6 *fl6 = &fl->u.ip6; 3527 3528 if (fl6->flowi6_proto == IPPROTO_ICMPV6 && 3529 (fl6->fl6_icmp_type == ICMPV6_DEST_UNREACH || 3530 fl6->fl6_icmp_type == ICMPV6_PKT_TOOBIG || 3531 fl6->fl6_icmp_type == ICMPV6_TIME_EXCEED)) 3532 return true; 3533 } 3534 #endif 3535 return false; 3536 } 3537 3538 static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family, 3539 const struct flowi *fl, struct flowi *fl1) 3540 { 3541 bool ret = true; 3542 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 3543 int hl = family == AF_INET ? (sizeof(struct iphdr) + sizeof(struct icmphdr)) : 3544 (sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr)); 3545 3546 if (!newskb) 3547 return true; 3548 3549 if (!pskb_pull(newskb, hl)) 3550 goto out; 3551 3552 skb_reset_network_header(newskb); 3553 3554 if (xfrm_decode_session_reverse(dev_net(skb->dev), newskb, fl1, family) < 0) 3555 goto out; 3556 3557 fl1->flowi_oif = fl->flowi_oif; 3558 fl1->flowi_mark = fl->flowi_mark; 3559 fl1->flowi_tos = fl->flowi_tos; 3560 nf_nat_decode_session(newskb, fl1, family); 3561 ret = false; 3562 3563 out: 3564 consume_skb(newskb); 3565 return ret; 3566 } 3567 3568 static bool xfrm_selector_inner_icmp_match(struct sk_buff *skb, unsigned short family, 3569 const struct xfrm_selector *sel, 3570 const struct flowi *fl) 3571 { 3572 bool ret = false; 3573 3574 if (icmp_err_packet(fl, family)) { 3575 struct flowi fl1; 3576 3577 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1)) 3578 return ret; 3579 3580 ret = xfrm_selector_match(sel, &fl1, family); 3581 } 3582 3583 return ret; 3584 } 3585 3586 static inline struct 3587 xfrm_policy *xfrm_in_fwd_icmp(struct sk_buff *skb, 3588 const struct flowi *fl, unsigned short family, 3589 u32 if_id) 3590 { 3591 struct xfrm_policy *pol = NULL; 3592 3593 if (icmp_err_packet(fl, family)) { 3594 struct flowi fl1; 3595 struct net *net = dev_net(skb->dev); 3596 3597 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1)) 3598 return pol; 3599 3600 pol = xfrm_policy_lookup(net, &fl1, family, XFRM_POLICY_FWD, if_id); 3601 if (IS_ERR(pol)) 3602 pol = NULL; 3603 } 3604 3605 return pol; 3606 } 3607 3608 static inline struct 3609 dst_entry *xfrm_out_fwd_icmp(struct sk_buff *skb, struct flowi *fl, 3610 unsigned short family, struct dst_entry *dst) 3611 { 3612 if (icmp_err_packet(fl, family)) { 3613 struct net *net = dev_net(skb->dev); 3614 struct dst_entry *dst2; 3615 struct flowi fl1; 3616 3617 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1)) 3618 return dst; 3619 3620 dst_hold(dst); 3621 3622 dst2 = xfrm_lookup(net, dst, &fl1, NULL, (XFRM_LOOKUP_QUEUE | XFRM_LOOKUP_ICMP)); 3623 3624 if (IS_ERR(dst2)) 3625 return dst; 3626 3627 if (dst2->xfrm) { 3628 dst_release(dst); 3629 dst = dst2; 3630 } else { 3631 dst_release(dst2); 3632 } 3633 } 3634 3635 return dst; 3636 } 3637 3638 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 3639 unsigned short family) 3640 { 3641 struct net *net = dev_net(skb->dev); 3642 struct xfrm_policy *pol; 3643 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 3644 int npols = 0; 3645 int xfrm_nr; 3646 int pi; 3647 int reverse; 3648 struct flowi fl; 3649 int xerr_idx = -1; 3650 const struct xfrm_if_cb *ifcb; 3651 struct sec_path *sp; 3652 u32 if_id = 0; 3653 3654 rcu_read_lock(); 3655 ifcb = xfrm_if_get_cb(); 3656 3657 if (ifcb) { 3658 struct xfrm_if_decode_session_result r; 3659 3660 if (ifcb->decode_session(skb, family, &r)) { 3661 if_id = r.if_id; 3662 net = r.net; 3663 } 3664 } 3665 rcu_read_unlock(); 3666 3667 reverse = dir & ~XFRM_POLICY_MASK; 3668 dir &= XFRM_POLICY_MASK; 3669 3670 if (__xfrm_decode_session(net, skb, &fl, family, reverse) < 0) { 3671 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 3672 return 0; 3673 } 3674 3675 nf_nat_decode_session(skb, &fl, family); 3676 3677 /* First, check used SA against their selectors. */ 3678 sp = skb_sec_path(skb); 3679 if (sp) { 3680 int i; 3681 3682 for (i = sp->len - 1; i >= 0; i--) { 3683 struct xfrm_state *x = sp->xvec[i]; 3684 int ret = 0; 3685 3686 if (!xfrm_selector_match(&x->sel, &fl, family)) { 3687 ret = 1; 3688 if (x->props.flags & XFRM_STATE_ICMP && 3689 xfrm_selector_inner_icmp_match(skb, family, &x->sel, &fl)) 3690 ret = 0; 3691 if (ret) { 3692 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 3693 return 0; 3694 } 3695 } 3696 } 3697 } 3698 3699 pol = NULL; 3700 sk = sk_to_full_sk(sk); 3701 if (sk && sk->sk_policy[dir]) { 3702 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id); 3703 if (IS_ERR(pol)) { 3704 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3705 return 0; 3706 } 3707 } 3708 3709 if (!pol) 3710 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id); 3711 3712 if (IS_ERR(pol)) { 3713 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3714 return 0; 3715 } 3716 3717 if (!pol && dir == XFRM_POLICY_FWD) 3718 pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id); 3719 3720 if (!pol) { 3721 if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) { 3722 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 3723 return 0; 3724 } 3725 3726 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) { 3727 xfrm_secpath_reject(xerr_idx, skb, &fl); 3728 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 3729 return 0; 3730 } 3731 return 1; 3732 } 3733 3734 /* This lockless write can happen from different cpus. */ 3735 WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds()); 3736 3737 pols[0] = pol; 3738 npols++; 3739 #ifdef CONFIG_XFRM_SUB_POLICY 3740 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 3741 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, 3742 &fl, family, 3743 XFRM_POLICY_IN, if_id); 3744 if (pols[1]) { 3745 if (IS_ERR(pols[1])) { 3746 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3747 xfrm_pol_put(pols[0]); 3748 return 0; 3749 } 3750 /* This write can happen from different cpus. */ 3751 WRITE_ONCE(pols[1]->curlft.use_time, 3752 ktime_get_real_seconds()); 3753 npols++; 3754 } 3755 } 3756 #endif 3757 3758 if (pol->action == XFRM_POLICY_ALLOW) { 3759 static struct sec_path dummy; 3760 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 3761 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 3762 struct xfrm_tmpl **tpp = tp; 3763 int ti = 0; 3764 int i, k; 3765 3766 sp = skb_sec_path(skb); 3767 if (!sp) 3768 sp = &dummy; 3769 3770 for (pi = 0; pi < npols; pi++) { 3771 if (pols[pi] != pol && 3772 pols[pi]->action != XFRM_POLICY_ALLOW) { 3773 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 3774 goto reject; 3775 } 3776 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 3777 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 3778 goto reject_error; 3779 } 3780 for (i = 0; i < pols[pi]->xfrm_nr; i++) 3781 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 3782 } 3783 xfrm_nr = ti; 3784 3785 if (npols > 1) { 3786 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); 3787 tpp = stp; 3788 } 3789 3790 /* For each tunnel xfrm, find the first matching tmpl. 3791 * For each tmpl before that, find corresponding xfrm. 3792 * Order is _important_. Later we will implement 3793 * some barriers, but at the moment barriers 3794 * are implied between each two transformations. 3795 * Upon success, marks secpath entries as having been 3796 * verified to allow them to be skipped in future policy 3797 * checks (e.g. nested tunnels). 3798 */ 3799 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 3800 k = xfrm_policy_ok(tpp[i], sp, k, family, if_id); 3801 if (k < 0) { 3802 if (k < -1) 3803 /* "-2 - errored_index" returned */ 3804 xerr_idx = -(2+k); 3805 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 3806 goto reject; 3807 } 3808 } 3809 3810 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 3811 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 3812 goto reject; 3813 } 3814 3815 xfrm_pols_put(pols, npols); 3816 sp->verified_cnt = k; 3817 3818 return 1; 3819 } 3820 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 3821 3822 reject: 3823 xfrm_secpath_reject(xerr_idx, skb, &fl); 3824 reject_error: 3825 xfrm_pols_put(pols, npols); 3826 return 0; 3827 } 3828 EXPORT_SYMBOL(__xfrm_policy_check); 3829 3830 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 3831 { 3832 struct net *net = dev_net(skb->dev); 3833 struct flowi fl; 3834 struct dst_entry *dst; 3835 int res = 1; 3836 3837 if (xfrm_decode_session(net, skb, &fl, family) < 0) { 3838 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 3839 return 0; 3840 } 3841 3842 skb_dst_force(skb); 3843 if (!skb_dst(skb)) { 3844 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 3845 return 0; 3846 } 3847 3848 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); 3849 if (IS_ERR(dst)) { 3850 res = 0; 3851 dst = NULL; 3852 } 3853 3854 if (dst && !dst->xfrm) 3855 dst = xfrm_out_fwd_icmp(skb, &fl, family, dst); 3856 3857 skb_dst_set(skb, dst); 3858 return res; 3859 } 3860 EXPORT_SYMBOL(__xfrm_route_forward); 3861 3862 /* Optimize later using cookies and generation ids. */ 3863 3864 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 3865 { 3866 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 3867 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to 3868 * get validated by dst_ops->check on every use. We do this 3869 * because when a normal route referenced by an XFRM dst is 3870 * obsoleted we do not go looking around for all parent 3871 * referencing XFRM dsts so that we can invalidate them. It 3872 * is just too much work. Instead we make the checks here on 3873 * every use. For example: 3874 * 3875 * XFRM dst A --> IPv4 dst X 3876 * 3877 * X is the "xdst->route" of A (X is also the "dst->path" of A 3878 * in this example). If X is marked obsolete, "A" will not 3879 * notice. That's what we are validating here via the 3880 * stale_bundle() check. 3881 * 3882 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will 3883 * be marked on it. 3884 * This will force stale_bundle() to fail on any xdst bundle with 3885 * this dst linked in it. 3886 */ 3887 if (dst->obsolete < 0 && !stale_bundle(dst)) 3888 return dst; 3889 3890 return NULL; 3891 } 3892 3893 static int stale_bundle(struct dst_entry *dst) 3894 { 3895 return !xfrm_bundle_ok((struct xfrm_dst *)dst); 3896 } 3897 3898 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 3899 { 3900 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) { 3901 dst->dev = blackhole_netdev; 3902 dev_hold(dst->dev); 3903 dev_put(dev); 3904 } 3905 } 3906 EXPORT_SYMBOL(xfrm_dst_ifdown); 3907 3908 static void xfrm_link_failure(struct sk_buff *skb) 3909 { 3910 /* Impossible. Such dst must be popped before reaches point of failure. */ 3911 } 3912 3913 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 3914 { 3915 if (dst) { 3916 if (dst->obsolete) { 3917 dst_release(dst); 3918 dst = NULL; 3919 } 3920 } 3921 return dst; 3922 } 3923 3924 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr) 3925 { 3926 while (nr--) { 3927 struct xfrm_dst *xdst = bundle[nr]; 3928 u32 pmtu, route_mtu_cached; 3929 struct dst_entry *dst; 3930 3931 dst = &xdst->u.dst; 3932 pmtu = dst_mtu(xfrm_dst_child(dst)); 3933 xdst->child_mtu_cached = pmtu; 3934 3935 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 3936 3937 route_mtu_cached = dst_mtu(xdst->route); 3938 xdst->route_mtu_cached = route_mtu_cached; 3939 3940 if (pmtu > route_mtu_cached) 3941 pmtu = route_mtu_cached; 3942 3943 dst_metric_set(dst, RTAX_MTU, pmtu); 3944 } 3945 } 3946 3947 /* Check that the bundle accepts the flow and its components are 3948 * still valid. 3949 */ 3950 3951 static int xfrm_bundle_ok(struct xfrm_dst *first) 3952 { 3953 struct xfrm_dst *bundle[XFRM_MAX_DEPTH]; 3954 struct dst_entry *dst = &first->u.dst; 3955 struct xfrm_dst *xdst; 3956 int start_from, nr; 3957 u32 mtu; 3958 3959 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) || 3960 (dst->dev && !netif_running(dst->dev))) 3961 return 0; 3962 3963 if (dst->flags & DST_XFRM_QUEUE) 3964 return 1; 3965 3966 start_from = nr = 0; 3967 do { 3968 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 3969 3970 if (dst->xfrm->km.state != XFRM_STATE_VALID) 3971 return 0; 3972 if (xdst->xfrm_genid != dst->xfrm->genid) 3973 return 0; 3974 if (xdst->num_pols > 0 && 3975 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 3976 return 0; 3977 3978 bundle[nr++] = xdst; 3979 3980 mtu = dst_mtu(xfrm_dst_child(dst)); 3981 if (xdst->child_mtu_cached != mtu) { 3982 start_from = nr; 3983 xdst->child_mtu_cached = mtu; 3984 } 3985 3986 if (!dst_check(xdst->route, xdst->route_cookie)) 3987 return 0; 3988 mtu = dst_mtu(xdst->route); 3989 if (xdst->route_mtu_cached != mtu) { 3990 start_from = nr; 3991 xdst->route_mtu_cached = mtu; 3992 } 3993 3994 dst = xfrm_dst_child(dst); 3995 } while (dst->xfrm); 3996 3997 if (likely(!start_from)) 3998 return 1; 3999 4000 xdst = bundle[start_from - 1]; 4001 mtu = xdst->child_mtu_cached; 4002 while (start_from--) { 4003 dst = &xdst->u.dst; 4004 4005 mtu = xfrm_state_mtu(dst->xfrm, mtu); 4006 if (mtu > xdst->route_mtu_cached) 4007 mtu = xdst->route_mtu_cached; 4008 dst_metric_set(dst, RTAX_MTU, mtu); 4009 if (!start_from) 4010 break; 4011 4012 xdst = bundle[start_from - 1]; 4013 xdst->child_mtu_cached = mtu; 4014 } 4015 4016 return 1; 4017 } 4018 4019 static unsigned int xfrm_default_advmss(const struct dst_entry *dst) 4020 { 4021 return dst_metric_advmss(xfrm_dst_path(dst)); 4022 } 4023 4024 static unsigned int xfrm_mtu(const struct dst_entry *dst) 4025 { 4026 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 4027 4028 return mtu ? : dst_mtu(xfrm_dst_path(dst)); 4029 } 4030 4031 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst, 4032 const void *daddr) 4033 { 4034 while (dst->xfrm) { 4035 const struct xfrm_state *xfrm = dst->xfrm; 4036 4037 dst = xfrm_dst_child(dst); 4038 4039 if (xfrm->props.mode == XFRM_MODE_TRANSPORT) 4040 continue; 4041 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) 4042 daddr = xfrm->coaddr; 4043 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) 4044 daddr = &xfrm->id.daddr; 4045 } 4046 return daddr; 4047 } 4048 4049 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, 4050 struct sk_buff *skb, 4051 const void *daddr) 4052 { 4053 const struct dst_entry *path = xfrm_dst_path(dst); 4054 4055 if (!skb) 4056 daddr = xfrm_get_dst_nexthop(dst, daddr); 4057 return path->ops->neigh_lookup(path, skb, daddr); 4058 } 4059 4060 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr) 4061 { 4062 const struct dst_entry *path = xfrm_dst_path(dst); 4063 4064 daddr = xfrm_get_dst_nexthop(dst, daddr); 4065 path->ops->confirm_neigh(path, daddr); 4066 } 4067 4068 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family) 4069 { 4070 int err = 0; 4071 4072 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo))) 4073 return -EAFNOSUPPORT; 4074 4075 spin_lock(&xfrm_policy_afinfo_lock); 4076 if (unlikely(xfrm_policy_afinfo[family] != NULL)) 4077 err = -EEXIST; 4078 else { 4079 struct dst_ops *dst_ops = afinfo->dst_ops; 4080 if (likely(dst_ops->kmem_cachep == NULL)) 4081 dst_ops->kmem_cachep = xfrm_dst_cache; 4082 if (likely(dst_ops->check == NULL)) 4083 dst_ops->check = xfrm_dst_check; 4084 if (likely(dst_ops->default_advmss == NULL)) 4085 dst_ops->default_advmss = xfrm_default_advmss; 4086 if (likely(dst_ops->mtu == NULL)) 4087 dst_ops->mtu = xfrm_mtu; 4088 if (likely(dst_ops->negative_advice == NULL)) 4089 dst_ops->negative_advice = xfrm_negative_advice; 4090 if (likely(dst_ops->link_failure == NULL)) 4091 dst_ops->link_failure = xfrm_link_failure; 4092 if (likely(dst_ops->neigh_lookup == NULL)) 4093 dst_ops->neigh_lookup = xfrm_neigh_lookup; 4094 if (likely(!dst_ops->confirm_neigh)) 4095 dst_ops->confirm_neigh = xfrm_confirm_neigh; 4096 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo); 4097 } 4098 spin_unlock(&xfrm_policy_afinfo_lock); 4099 4100 return err; 4101 } 4102 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 4103 4104 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo) 4105 { 4106 struct dst_ops *dst_ops = afinfo->dst_ops; 4107 int i; 4108 4109 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) { 4110 if (xfrm_policy_afinfo[i] != afinfo) 4111 continue; 4112 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL); 4113 break; 4114 } 4115 4116 synchronize_rcu(); 4117 4118 dst_ops->kmem_cachep = NULL; 4119 dst_ops->check = NULL; 4120 dst_ops->negative_advice = NULL; 4121 dst_ops->link_failure = NULL; 4122 } 4123 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 4124 4125 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb) 4126 { 4127 spin_lock(&xfrm_if_cb_lock); 4128 rcu_assign_pointer(xfrm_if_cb, ifcb); 4129 spin_unlock(&xfrm_if_cb_lock); 4130 } 4131 EXPORT_SYMBOL(xfrm_if_register_cb); 4132 4133 void xfrm_if_unregister_cb(void) 4134 { 4135 RCU_INIT_POINTER(xfrm_if_cb, NULL); 4136 synchronize_rcu(); 4137 } 4138 EXPORT_SYMBOL(xfrm_if_unregister_cb); 4139 4140 #ifdef CONFIG_XFRM_STATISTICS 4141 static int __net_init xfrm_statistics_init(struct net *net) 4142 { 4143 int rv; 4144 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib); 4145 if (!net->mib.xfrm_statistics) 4146 return -ENOMEM; 4147 rv = xfrm_proc_init(net); 4148 if (rv < 0) 4149 free_percpu(net->mib.xfrm_statistics); 4150 return rv; 4151 } 4152 4153 static void xfrm_statistics_fini(struct net *net) 4154 { 4155 xfrm_proc_fini(net); 4156 free_percpu(net->mib.xfrm_statistics); 4157 } 4158 #else 4159 static int __net_init xfrm_statistics_init(struct net *net) 4160 { 4161 return 0; 4162 } 4163 4164 static void xfrm_statistics_fini(struct net *net) 4165 { 4166 } 4167 #endif 4168 4169 static int __net_init xfrm_policy_init(struct net *net) 4170 { 4171 unsigned int hmask, sz; 4172 int dir, err; 4173 4174 if (net_eq(net, &init_net)) { 4175 xfrm_dst_cache = KMEM_CACHE(xfrm_dst, SLAB_HWCACHE_ALIGN | SLAB_PANIC); 4176 err = rhashtable_init(&xfrm_policy_inexact_table, 4177 &xfrm_pol_inexact_params); 4178 BUG_ON(err); 4179 } 4180 4181 hmask = 8 - 1; 4182 sz = (hmask+1) * sizeof(struct hlist_head); 4183 4184 net->xfrm.policy_byidx = xfrm_hash_alloc(sz); 4185 if (!net->xfrm.policy_byidx) 4186 goto out_byidx; 4187 net->xfrm.policy_idx_hmask = hmask; 4188 4189 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 4190 struct xfrm_policy_hash *htab; 4191 4192 net->xfrm.policy_count[dir] = 0; 4193 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0; 4194 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 4195 4196 htab = &net->xfrm.policy_bydst[dir]; 4197 htab->table = xfrm_hash_alloc(sz); 4198 if (!htab->table) 4199 goto out_bydst; 4200 htab->hmask = hmask; 4201 htab->dbits4 = 32; 4202 htab->sbits4 = 32; 4203 htab->dbits6 = 128; 4204 htab->sbits6 = 128; 4205 } 4206 net->xfrm.policy_hthresh.lbits4 = 32; 4207 net->xfrm.policy_hthresh.rbits4 = 32; 4208 net->xfrm.policy_hthresh.lbits6 = 128; 4209 net->xfrm.policy_hthresh.rbits6 = 128; 4210 4211 seqlock_init(&net->xfrm.policy_hthresh.lock); 4212 4213 INIT_LIST_HEAD(&net->xfrm.policy_all); 4214 INIT_LIST_HEAD(&net->xfrm.inexact_bins); 4215 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 4216 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild); 4217 return 0; 4218 4219 out_bydst: 4220 for (dir--; dir >= 0; dir--) { 4221 struct xfrm_policy_hash *htab; 4222 4223 htab = &net->xfrm.policy_bydst[dir]; 4224 xfrm_hash_free(htab->table, sz); 4225 } 4226 xfrm_hash_free(net->xfrm.policy_byidx, sz); 4227 out_byidx: 4228 return -ENOMEM; 4229 } 4230 4231 static void xfrm_policy_fini(struct net *net) 4232 { 4233 struct xfrm_pol_inexact_bin *b, *t; 4234 unsigned int sz; 4235 int dir; 4236 4237 flush_work(&net->xfrm.policy_hash_work); 4238 #ifdef CONFIG_XFRM_SUB_POLICY 4239 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false); 4240 #endif 4241 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false); 4242 4243 WARN_ON(!list_empty(&net->xfrm.policy_all)); 4244 4245 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 4246 struct xfrm_policy_hash *htab; 4247 4248 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 4249 4250 htab = &net->xfrm.policy_bydst[dir]; 4251 sz = (htab->hmask + 1) * sizeof(struct hlist_head); 4252 WARN_ON(!hlist_empty(htab->table)); 4253 xfrm_hash_free(htab->table, sz); 4254 } 4255 4256 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); 4257 WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); 4258 xfrm_hash_free(net->xfrm.policy_byidx, sz); 4259 4260 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 4261 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins) 4262 __xfrm_policy_inexact_prune_bin(b, true); 4263 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 4264 } 4265 4266 static int __net_init xfrm_net_init(struct net *net) 4267 { 4268 int rv; 4269 4270 /* Initialize the per-net locks here */ 4271 spin_lock_init(&net->xfrm.xfrm_state_lock); 4272 spin_lock_init(&net->xfrm.xfrm_policy_lock); 4273 seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock); 4274 mutex_init(&net->xfrm.xfrm_cfg_mutex); 4275 net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT; 4276 net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT; 4277 net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT; 4278 4279 rv = xfrm_statistics_init(net); 4280 if (rv < 0) 4281 goto out_statistics; 4282 rv = xfrm_state_init(net); 4283 if (rv < 0) 4284 goto out_state; 4285 rv = xfrm_policy_init(net); 4286 if (rv < 0) 4287 goto out_policy; 4288 rv = xfrm_sysctl_init(net); 4289 if (rv < 0) 4290 goto out_sysctl; 4291 4292 return 0; 4293 4294 out_sysctl: 4295 xfrm_policy_fini(net); 4296 out_policy: 4297 xfrm_state_fini(net); 4298 out_state: 4299 xfrm_statistics_fini(net); 4300 out_statistics: 4301 return rv; 4302 } 4303 4304 static void __net_exit xfrm_net_exit(struct net *net) 4305 { 4306 xfrm_sysctl_fini(net); 4307 xfrm_policy_fini(net); 4308 xfrm_state_fini(net); 4309 xfrm_statistics_fini(net); 4310 } 4311 4312 static struct pernet_operations __net_initdata xfrm_net_ops = { 4313 .init = xfrm_net_init, 4314 .exit = xfrm_net_exit, 4315 }; 4316 4317 static const struct flow_dissector_key xfrm_flow_dissector_keys[] = { 4318 { 4319 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 4320 .offset = offsetof(struct xfrm_flow_keys, control), 4321 }, 4322 { 4323 .key_id = FLOW_DISSECTOR_KEY_BASIC, 4324 .offset = offsetof(struct xfrm_flow_keys, basic), 4325 }, 4326 { 4327 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 4328 .offset = offsetof(struct xfrm_flow_keys, addrs.ipv4), 4329 }, 4330 { 4331 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 4332 .offset = offsetof(struct xfrm_flow_keys, addrs.ipv6), 4333 }, 4334 { 4335 .key_id = FLOW_DISSECTOR_KEY_PORTS, 4336 .offset = offsetof(struct xfrm_flow_keys, ports), 4337 }, 4338 { 4339 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, 4340 .offset = offsetof(struct xfrm_flow_keys, gre), 4341 }, 4342 { 4343 .key_id = FLOW_DISSECTOR_KEY_IP, 4344 .offset = offsetof(struct xfrm_flow_keys, ip), 4345 }, 4346 { 4347 .key_id = FLOW_DISSECTOR_KEY_ICMP, 4348 .offset = offsetof(struct xfrm_flow_keys, icmp), 4349 }, 4350 }; 4351 4352 void __init xfrm_init(void) 4353 { 4354 skb_flow_dissector_init(&xfrm_session_dissector, 4355 xfrm_flow_dissector_keys, 4356 ARRAY_SIZE(xfrm_flow_dissector_keys)); 4357 4358 register_pernet_subsys(&xfrm_net_ops); 4359 xfrm_dev_init(); 4360 xfrm_input_init(); 4361 4362 #ifdef CONFIG_XFRM_ESPINTCP 4363 espintcp_init(); 4364 #endif 4365 4366 register_xfrm_state_bpf(); 4367 } 4368 4369 #ifdef CONFIG_AUDITSYSCALL 4370 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 4371 struct audit_buffer *audit_buf) 4372 { 4373 struct xfrm_sec_ctx *ctx = xp->security; 4374 struct xfrm_selector *sel = &xp->selector; 4375 4376 if (ctx) 4377 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 4378 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 4379 4380 switch (sel->family) { 4381 case AF_INET: 4382 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); 4383 if (sel->prefixlen_s != 32) 4384 audit_log_format(audit_buf, " src_prefixlen=%d", 4385 sel->prefixlen_s); 4386 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); 4387 if (sel->prefixlen_d != 32) 4388 audit_log_format(audit_buf, " dst_prefixlen=%d", 4389 sel->prefixlen_d); 4390 break; 4391 case AF_INET6: 4392 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); 4393 if (sel->prefixlen_s != 128) 4394 audit_log_format(audit_buf, " src_prefixlen=%d", 4395 sel->prefixlen_s); 4396 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); 4397 if (sel->prefixlen_d != 128) 4398 audit_log_format(audit_buf, " dst_prefixlen=%d", 4399 sel->prefixlen_d); 4400 break; 4401 } 4402 } 4403 4404 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid) 4405 { 4406 struct audit_buffer *audit_buf; 4407 4408 audit_buf = xfrm_audit_start("SPD-add"); 4409 if (audit_buf == NULL) 4410 return; 4411 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 4412 audit_log_format(audit_buf, " res=%u", result); 4413 xfrm_audit_common_policyinfo(xp, audit_buf); 4414 audit_log_end(audit_buf); 4415 } 4416 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 4417 4418 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 4419 bool task_valid) 4420 { 4421 struct audit_buffer *audit_buf; 4422 4423 audit_buf = xfrm_audit_start("SPD-delete"); 4424 if (audit_buf == NULL) 4425 return; 4426 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 4427 audit_log_format(audit_buf, " res=%u", result); 4428 xfrm_audit_common_policyinfo(xp, audit_buf); 4429 audit_log_end(audit_buf); 4430 } 4431 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 4432 #endif 4433 4434 #ifdef CONFIG_XFRM_MIGRATE 4435 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 4436 const struct xfrm_selector *sel_tgt) 4437 { 4438 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 4439 if (sel_tgt->family == sel_cmp->family && 4440 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr, 4441 sel_cmp->family) && 4442 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr, 4443 sel_cmp->family) && 4444 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 4445 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 4446 return true; 4447 } 4448 } else { 4449 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 4450 return true; 4451 } 4452 } 4453 return false; 4454 } 4455 4456 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel, 4457 u8 dir, u8 type, struct net *net, u32 if_id) 4458 { 4459 struct xfrm_policy *pol, *ret = NULL; 4460 struct hlist_head *chain; 4461 u32 priority = ~0U; 4462 4463 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 4464 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir); 4465 hlist_for_each_entry(pol, chain, bydst) { 4466 if ((if_id == 0 || pol->if_id == if_id) && 4467 xfrm_migrate_selector_match(sel, &pol->selector) && 4468 pol->type == type) { 4469 ret = pol; 4470 priority = ret->priority; 4471 break; 4472 } 4473 } 4474 chain = &net->xfrm.policy_inexact[dir]; 4475 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 4476 if ((pol->priority >= priority) && ret) 4477 break; 4478 4479 if ((if_id == 0 || pol->if_id == if_id) && 4480 xfrm_migrate_selector_match(sel, &pol->selector) && 4481 pol->type == type) { 4482 ret = pol; 4483 break; 4484 } 4485 } 4486 4487 xfrm_pol_hold(ret); 4488 4489 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 4490 4491 return ret; 4492 } 4493 4494 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) 4495 { 4496 int match = 0; 4497 4498 if (t->mode == m->mode && t->id.proto == m->proto && 4499 (m->reqid == 0 || t->reqid == m->reqid)) { 4500 switch (t->mode) { 4501 case XFRM_MODE_TUNNEL: 4502 case XFRM_MODE_BEET: 4503 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr, 4504 m->old_family) && 4505 xfrm_addr_equal(&t->saddr, &m->old_saddr, 4506 m->old_family)) { 4507 match = 1; 4508 } 4509 break; 4510 case XFRM_MODE_TRANSPORT: 4511 /* in case of transport mode, template does not store 4512 any IP addresses, hence we just compare mode and 4513 protocol */ 4514 match = 1; 4515 break; 4516 default: 4517 break; 4518 } 4519 } 4520 return match; 4521 } 4522 4523 /* update endpoint address(es) of template(s) */ 4524 static int xfrm_policy_migrate(struct xfrm_policy *pol, 4525 struct xfrm_migrate *m, int num_migrate, 4526 struct netlink_ext_ack *extack) 4527 { 4528 struct xfrm_migrate *mp; 4529 int i, j, n = 0; 4530 4531 write_lock_bh(&pol->lock); 4532 if (unlikely(pol->walk.dead)) { 4533 /* target policy has been deleted */ 4534 NL_SET_ERR_MSG(extack, "Target policy not found"); 4535 write_unlock_bh(&pol->lock); 4536 return -ENOENT; 4537 } 4538 4539 for (i = 0; i < pol->xfrm_nr; i++) { 4540 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 4541 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 4542 continue; 4543 n++; 4544 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 4545 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 4546 continue; 4547 /* update endpoints */ 4548 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 4549 sizeof(pol->xfrm_vec[i].id.daddr)); 4550 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 4551 sizeof(pol->xfrm_vec[i].saddr)); 4552 pol->xfrm_vec[i].encap_family = mp->new_family; 4553 /* flush bundles */ 4554 atomic_inc(&pol->genid); 4555 } 4556 } 4557 4558 write_unlock_bh(&pol->lock); 4559 4560 if (!n) 4561 return -ENODATA; 4562 4563 return 0; 4564 } 4565 4566 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate, 4567 struct netlink_ext_ack *extack) 4568 { 4569 int i, j; 4570 4571 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) { 4572 NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)"); 4573 return -EINVAL; 4574 } 4575 4576 for (i = 0; i < num_migrate; i++) { 4577 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 4578 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) { 4579 NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null"); 4580 return -EINVAL; 4581 } 4582 4583 /* check if there is any duplicated entry */ 4584 for (j = i + 1; j < num_migrate; j++) { 4585 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 4586 sizeof(m[i].old_daddr)) && 4587 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 4588 sizeof(m[i].old_saddr)) && 4589 m[i].proto == m[j].proto && 4590 m[i].mode == m[j].mode && 4591 m[i].reqid == m[j].reqid && 4592 m[i].old_family == m[j].old_family) { 4593 NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique"); 4594 return -EINVAL; 4595 } 4596 } 4597 } 4598 4599 return 0; 4600 } 4601 4602 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 4603 struct xfrm_migrate *m, int num_migrate, 4604 struct xfrm_kmaddress *k, struct net *net, 4605 struct xfrm_encap_tmpl *encap, u32 if_id, 4606 struct netlink_ext_ack *extack) 4607 { 4608 int i, err, nx_cur = 0, nx_new = 0; 4609 struct xfrm_policy *pol = NULL; 4610 struct xfrm_state *x, *xc; 4611 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 4612 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 4613 struct xfrm_migrate *mp; 4614 4615 /* Stage 0 - sanity checks */ 4616 err = xfrm_migrate_check(m, num_migrate, extack); 4617 if (err < 0) 4618 goto out; 4619 4620 if (dir >= XFRM_POLICY_MAX) { 4621 NL_SET_ERR_MSG(extack, "Invalid policy direction"); 4622 err = -EINVAL; 4623 goto out; 4624 } 4625 4626 /* Stage 1 - find policy */ 4627 pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id); 4628 if (!pol) { 4629 NL_SET_ERR_MSG(extack, "Target policy not found"); 4630 err = -ENOENT; 4631 goto out; 4632 } 4633 4634 /* Stage 2 - find and update state(s) */ 4635 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 4636 if ((x = xfrm_migrate_state_find(mp, net, if_id))) { 4637 x_cur[nx_cur] = x; 4638 nx_cur++; 4639 xc = xfrm_state_migrate(x, mp, encap); 4640 if (xc) { 4641 x_new[nx_new] = xc; 4642 nx_new++; 4643 } else { 4644 err = -ENODATA; 4645 goto restore_state; 4646 } 4647 } 4648 } 4649 4650 /* Stage 3 - update policy */ 4651 err = xfrm_policy_migrate(pol, m, num_migrate, extack); 4652 if (err < 0) 4653 goto restore_state; 4654 4655 /* Stage 4 - delete old state(s) */ 4656 if (nx_cur) { 4657 xfrm_states_put(x_cur, nx_cur); 4658 xfrm_states_delete(x_cur, nx_cur); 4659 } 4660 4661 /* Stage 5 - announce */ 4662 km_migrate(sel, dir, type, m, num_migrate, k, encap); 4663 4664 xfrm_pol_put(pol); 4665 4666 return 0; 4667 out: 4668 return err; 4669 4670 restore_state: 4671 if (pol) 4672 xfrm_pol_put(pol); 4673 if (nx_cur) 4674 xfrm_states_put(x_cur, nx_cur); 4675 if (nx_new) 4676 xfrm_states_delete(x_new, nx_new); 4677 4678 return err; 4679 } 4680 EXPORT_SYMBOL(xfrm_migrate); 4681 #endif 4682