1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * xfrm_policy.c 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * Kazunori MIYAZAWA @USAGI 11 * YOSHIFUJI Hideaki 12 * Split up af-specific portion 13 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 14 * 15 */ 16 17 #include <linux/err.h> 18 #include <linux/slab.h> 19 #include <linux/kmod.h> 20 #include <linux/list.h> 21 #include <linux/spinlock.h> 22 #include <linux/workqueue.h> 23 #include <linux/notifier.h> 24 #include <linux/netdevice.h> 25 #include <linux/netfilter.h> 26 #include <linux/module.h> 27 #include <linux/cache.h> 28 #include <linux/cpu.h> 29 #include <linux/audit.h> 30 #include <linux/rhashtable.h> 31 #include <linux/if_tunnel.h> 32 #include <linux/icmp.h> 33 #include <net/dst.h> 34 #include <net/flow.h> 35 #include <net/inet_ecn.h> 36 #include <net/xfrm.h> 37 #include <net/ip.h> 38 #include <net/gre.h> 39 #if IS_ENABLED(CONFIG_IPV6_MIP6) 40 #include <net/mip6.h> 41 #endif 42 #ifdef CONFIG_XFRM_STATISTICS 43 #include <net/snmp.h> 44 #endif 45 #ifdef CONFIG_XFRM_ESPINTCP 46 #include <net/espintcp.h> 47 #endif 48 #include <net/inet_dscp.h> 49 50 #include "xfrm_hash.h" 51 52 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10)) 53 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) 54 #define XFRM_MAX_QUEUE_LEN 100 55 56 struct xfrm_flo { 57 struct dst_entry *dst_orig; 58 u8 flags; 59 }; 60 61 /* prefixes smaller than this are stored in lists, not trees. */ 62 #define INEXACT_PREFIXLEN_IPV4 16 63 #define INEXACT_PREFIXLEN_IPV6 48 64 65 struct xfrm_pol_inexact_node { 66 struct rb_node node; 67 union { 68 xfrm_address_t addr; 69 struct rcu_head rcu; 70 }; 71 u8 prefixlen; 72 73 struct rb_root root; 74 75 /* the policies matching this node, can be empty list */ 76 struct hlist_head hhead; 77 }; 78 79 /* xfrm inexact policy search tree: 80 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id); 81 * | 82 * +---- root_d: sorted by daddr:prefix 83 * | | 84 * | xfrm_pol_inexact_node 85 * | | 86 * | +- root: sorted by saddr/prefix 87 * | | | 88 * | | xfrm_pol_inexact_node 89 * | | | 90 * | | + root: unused 91 * | | | 92 * | | + hhead: saddr:daddr policies 93 * | | 94 * | +- coarse policies and all any:daddr policies 95 * | 96 * +---- root_s: sorted by saddr:prefix 97 * | | 98 * | xfrm_pol_inexact_node 99 * | | 100 * | + root: unused 101 * | | 102 * | + hhead: saddr:any policies 103 * | 104 * +---- coarse policies and all any:any policies 105 * 106 * Lookups return four candidate lists: 107 * 1. any:any list from top-level xfrm_pol_inexact_bin 108 * 2. any:daddr list from daddr tree 109 * 3. saddr:daddr list from 2nd level daddr tree 110 * 4. saddr:any list from saddr tree 111 * 112 * This result set then needs to be searched for the policy with 113 * the lowest priority. If two results have same prio, youngest one wins. 114 */ 115 116 struct xfrm_pol_inexact_key { 117 possible_net_t net; 118 u32 if_id; 119 u16 family; 120 u8 dir, type; 121 }; 122 123 struct xfrm_pol_inexact_bin { 124 struct xfrm_pol_inexact_key k; 125 struct rhash_head head; 126 /* list containing '*:*' policies */ 127 struct hlist_head hhead; 128 129 seqcount_spinlock_t count; 130 /* tree sorted by daddr/prefix */ 131 struct rb_root root_d; 132 133 /* tree sorted by saddr/prefix */ 134 struct rb_root root_s; 135 136 /* slow path below */ 137 struct list_head inexact_bins; 138 struct rcu_head rcu; 139 }; 140 141 enum xfrm_pol_inexact_candidate_type { 142 XFRM_POL_CAND_BOTH, 143 XFRM_POL_CAND_SADDR, 144 XFRM_POL_CAND_DADDR, 145 XFRM_POL_CAND_ANY, 146 147 XFRM_POL_CAND_MAX, 148 }; 149 150 struct xfrm_pol_inexact_candidates { 151 struct hlist_head *res[XFRM_POL_CAND_MAX]; 152 }; 153 154 struct xfrm_flow_keys { 155 struct flow_dissector_key_basic basic; 156 struct flow_dissector_key_control control; 157 union { 158 struct flow_dissector_key_ipv4_addrs ipv4; 159 struct flow_dissector_key_ipv6_addrs ipv6; 160 } addrs; 161 struct flow_dissector_key_ip ip; 162 struct flow_dissector_key_icmp icmp; 163 struct flow_dissector_key_ports ports; 164 struct flow_dissector_key_keyid gre; 165 }; 166 167 static struct flow_dissector xfrm_session_dissector __ro_after_init; 168 169 static DEFINE_SPINLOCK(xfrm_if_cb_lock); 170 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly; 171 172 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); 173 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1] 174 __read_mostly; 175 176 static struct kmem_cache *xfrm_dst_cache __ro_after_init; 177 178 static struct rhashtable xfrm_policy_inexact_table; 179 static const struct rhashtable_params xfrm_pol_inexact_params; 180 181 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr); 182 static int stale_bundle(struct dst_entry *dst); 183 static int xfrm_bundle_ok(struct xfrm_dst *xdst); 184 static void xfrm_policy_queue_process(struct timer_list *t); 185 186 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir); 187 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 188 int dir); 189 190 static struct xfrm_pol_inexact_bin * 191 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir, 192 u32 if_id); 193 194 static struct xfrm_pol_inexact_bin * 195 xfrm_policy_inexact_lookup_rcu(struct net *net, 196 u8 type, u16 family, u8 dir, u32 if_id); 197 static struct xfrm_policy * 198 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy, 199 bool excl); 200 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, 201 struct xfrm_policy *policy); 202 203 static bool 204 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand, 205 struct xfrm_pol_inexact_bin *b, 206 const xfrm_address_t *saddr, 207 const xfrm_address_t *daddr); 208 209 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy) 210 { 211 return refcount_inc_not_zero(&policy->refcnt); 212 } 213 214 static inline bool 215 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 216 { 217 const struct flowi4 *fl4 = &fl->u.ip4; 218 219 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && 220 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && 221 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && 222 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && 223 (fl4->flowi4_proto == sel->proto || !sel->proto) && 224 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 225 } 226 227 static inline bool 228 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 229 { 230 const struct flowi6 *fl6 = &fl->u.ip6; 231 232 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && 233 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && 234 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && 235 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && 236 (fl6->flowi6_proto == sel->proto || !sel->proto) && 237 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 238 } 239 240 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 241 unsigned short family) 242 { 243 switch (family) { 244 case AF_INET: 245 return __xfrm4_selector_match(sel, fl); 246 case AF_INET6: 247 return __xfrm6_selector_match(sel, fl); 248 } 249 return false; 250 } 251 252 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 253 { 254 const struct xfrm_policy_afinfo *afinfo; 255 256 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo))) 257 return NULL; 258 rcu_read_lock(); 259 afinfo = rcu_dereference(xfrm_policy_afinfo[family]); 260 if (unlikely(!afinfo)) 261 rcu_read_unlock(); 262 return afinfo; 263 } 264 265 /* Called with rcu_read_lock(). */ 266 static const struct xfrm_if_cb *xfrm_if_get_cb(void) 267 { 268 return rcu_dereference(xfrm_if_cb); 269 } 270 271 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, 272 const xfrm_address_t *saddr, 273 const xfrm_address_t *daddr, 274 int family, u32 mark) 275 { 276 const struct xfrm_policy_afinfo *afinfo; 277 struct dst_entry *dst; 278 279 afinfo = xfrm_policy_get_afinfo(family); 280 if (unlikely(afinfo == NULL)) 281 return ERR_PTR(-EAFNOSUPPORT); 282 283 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark); 284 285 rcu_read_unlock(); 286 287 return dst; 288 } 289 EXPORT_SYMBOL(__xfrm_dst_lookup); 290 291 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, 292 int tos, int oif, 293 xfrm_address_t *prev_saddr, 294 xfrm_address_t *prev_daddr, 295 int family, u32 mark) 296 { 297 struct net *net = xs_net(x); 298 xfrm_address_t *saddr = &x->props.saddr; 299 xfrm_address_t *daddr = &x->id.daddr; 300 struct dst_entry *dst; 301 302 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 303 saddr = x->coaddr; 304 daddr = prev_daddr; 305 } 306 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 307 saddr = prev_saddr; 308 daddr = x->coaddr; 309 } 310 311 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark); 312 313 if (!IS_ERR(dst)) { 314 if (prev_saddr != saddr) 315 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 316 if (prev_daddr != daddr) 317 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 318 } 319 320 return dst; 321 } 322 323 static inline unsigned long make_jiffies(long secs) 324 { 325 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 326 return MAX_SCHEDULE_TIMEOUT-1; 327 else 328 return secs*HZ; 329 } 330 331 static void xfrm_policy_timer(struct timer_list *t) 332 { 333 struct xfrm_policy *xp = from_timer(xp, t, timer); 334 time64_t now = ktime_get_real_seconds(); 335 time64_t next = TIME64_MAX; 336 int warn = 0; 337 int dir; 338 339 read_lock(&xp->lock); 340 341 if (unlikely(xp->walk.dead)) 342 goto out; 343 344 dir = xfrm_policy_id2dir(xp->index); 345 346 if (xp->lft.hard_add_expires_seconds) { 347 time64_t tmo = xp->lft.hard_add_expires_seconds + 348 xp->curlft.add_time - now; 349 if (tmo <= 0) 350 goto expired; 351 if (tmo < next) 352 next = tmo; 353 } 354 if (xp->lft.hard_use_expires_seconds) { 355 time64_t tmo = xp->lft.hard_use_expires_seconds + 356 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now; 357 if (tmo <= 0) 358 goto expired; 359 if (tmo < next) 360 next = tmo; 361 } 362 if (xp->lft.soft_add_expires_seconds) { 363 time64_t tmo = xp->lft.soft_add_expires_seconds + 364 xp->curlft.add_time - now; 365 if (tmo <= 0) { 366 warn = 1; 367 tmo = XFRM_KM_TIMEOUT; 368 } 369 if (tmo < next) 370 next = tmo; 371 } 372 if (xp->lft.soft_use_expires_seconds) { 373 time64_t tmo = xp->lft.soft_use_expires_seconds + 374 (READ_ONCE(xp->curlft.use_time) ? : xp->curlft.add_time) - now; 375 if (tmo <= 0) { 376 warn = 1; 377 tmo = XFRM_KM_TIMEOUT; 378 } 379 if (tmo < next) 380 next = tmo; 381 } 382 383 if (warn) 384 km_policy_expired(xp, dir, 0, 0); 385 if (next != TIME64_MAX && 386 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 387 xfrm_pol_hold(xp); 388 389 out: 390 read_unlock(&xp->lock); 391 xfrm_pol_put(xp); 392 return; 393 394 expired: 395 read_unlock(&xp->lock); 396 if (!xfrm_policy_delete(xp, dir)) 397 km_policy_expired(xp, dir, 1, 0); 398 xfrm_pol_put(xp); 399 } 400 401 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 402 * SPD calls. 403 */ 404 405 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) 406 { 407 struct xfrm_policy *policy; 408 409 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 410 411 if (policy) { 412 write_pnet(&policy->xp_net, net); 413 INIT_LIST_HEAD(&policy->walk.all); 414 INIT_HLIST_NODE(&policy->bydst_inexact_list); 415 INIT_HLIST_NODE(&policy->bydst); 416 INIT_HLIST_NODE(&policy->byidx); 417 rwlock_init(&policy->lock); 418 refcount_set(&policy->refcnt, 1); 419 skb_queue_head_init(&policy->polq.hold_queue); 420 timer_setup(&policy->timer, xfrm_policy_timer, 0); 421 timer_setup(&policy->polq.hold_timer, 422 xfrm_policy_queue_process, 0); 423 } 424 return policy; 425 } 426 EXPORT_SYMBOL(xfrm_policy_alloc); 427 428 static void xfrm_policy_destroy_rcu(struct rcu_head *head) 429 { 430 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu); 431 432 security_xfrm_policy_free(policy->security); 433 kfree(policy); 434 } 435 436 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 437 438 void xfrm_policy_destroy(struct xfrm_policy *policy) 439 { 440 BUG_ON(!policy->walk.dead); 441 442 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) 443 BUG(); 444 445 xfrm_dev_policy_free(policy); 446 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu); 447 } 448 EXPORT_SYMBOL(xfrm_policy_destroy); 449 450 /* Rule must be locked. Release descendant resources, announce 451 * entry dead. The rule must be unlinked from lists to the moment. 452 */ 453 454 static void xfrm_policy_kill(struct xfrm_policy *policy) 455 { 456 xfrm_dev_policy_delete(policy); 457 458 write_lock_bh(&policy->lock); 459 policy->walk.dead = 1; 460 write_unlock_bh(&policy->lock); 461 462 atomic_inc(&policy->genid); 463 464 if (del_timer(&policy->polq.hold_timer)) 465 xfrm_pol_put(policy); 466 skb_queue_purge(&policy->polq.hold_queue); 467 468 if (del_timer(&policy->timer)) 469 xfrm_pol_put(policy); 470 471 xfrm_pol_put(policy); 472 } 473 474 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 475 476 static inline unsigned int idx_hash(struct net *net, u32 index) 477 { 478 return __idx_hash(index, net->xfrm.policy_idx_hmask); 479 } 480 481 /* calculate policy hash thresholds */ 482 static void __get_hash_thresh(struct net *net, 483 unsigned short family, int dir, 484 u8 *dbits, u8 *sbits) 485 { 486 switch (family) { 487 case AF_INET: 488 *dbits = net->xfrm.policy_bydst[dir].dbits4; 489 *sbits = net->xfrm.policy_bydst[dir].sbits4; 490 break; 491 492 case AF_INET6: 493 *dbits = net->xfrm.policy_bydst[dir].dbits6; 494 *sbits = net->xfrm.policy_bydst[dir].sbits6; 495 break; 496 497 default: 498 *dbits = 0; 499 *sbits = 0; 500 } 501 } 502 503 static struct hlist_head *policy_hash_bysel(struct net *net, 504 const struct xfrm_selector *sel, 505 unsigned short family, int dir) 506 { 507 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 508 unsigned int hash; 509 u8 dbits; 510 u8 sbits; 511 512 __get_hash_thresh(net, family, dir, &dbits, &sbits); 513 hash = __sel_hash(sel, family, hmask, dbits, sbits); 514 515 if (hash == hmask + 1) 516 return NULL; 517 518 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table, 519 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash; 520 } 521 522 static struct hlist_head *policy_hash_direct(struct net *net, 523 const xfrm_address_t *daddr, 524 const xfrm_address_t *saddr, 525 unsigned short family, int dir) 526 { 527 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 528 unsigned int hash; 529 u8 dbits; 530 u8 sbits; 531 532 __get_hash_thresh(net, family, dir, &dbits, &sbits); 533 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits); 534 535 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table, 536 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash; 537 } 538 539 static void xfrm_dst_hash_transfer(struct net *net, 540 struct hlist_head *list, 541 struct hlist_head *ndsttable, 542 unsigned int nhashmask, 543 int dir) 544 { 545 struct hlist_node *tmp, *entry0 = NULL; 546 struct xfrm_policy *pol; 547 unsigned int h0 = 0; 548 u8 dbits; 549 u8 sbits; 550 551 redo: 552 hlist_for_each_entry_safe(pol, tmp, list, bydst) { 553 unsigned int h; 554 555 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits); 556 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 557 pol->family, nhashmask, dbits, sbits); 558 if (!entry0 || pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { 559 hlist_del_rcu(&pol->bydst); 560 hlist_add_head_rcu(&pol->bydst, ndsttable + h); 561 h0 = h; 562 } else { 563 if (h != h0) 564 continue; 565 hlist_del_rcu(&pol->bydst); 566 hlist_add_behind_rcu(&pol->bydst, entry0); 567 } 568 entry0 = &pol->bydst; 569 } 570 if (!hlist_empty(list)) { 571 entry0 = NULL; 572 goto redo; 573 } 574 } 575 576 static void xfrm_idx_hash_transfer(struct hlist_head *list, 577 struct hlist_head *nidxtable, 578 unsigned int nhashmask) 579 { 580 struct hlist_node *tmp; 581 struct xfrm_policy *pol; 582 583 hlist_for_each_entry_safe(pol, tmp, list, byidx) { 584 unsigned int h; 585 586 h = __idx_hash(pol->index, nhashmask); 587 hlist_add_head(&pol->byidx, nidxtable+h); 588 } 589 } 590 591 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 592 { 593 return ((old_hmask + 1) << 1) - 1; 594 } 595 596 static void xfrm_bydst_resize(struct net *net, int dir) 597 { 598 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 599 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 600 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 601 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 602 struct hlist_head *odst; 603 int i; 604 605 if (!ndst) 606 return; 607 608 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 609 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); 610 611 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table, 612 lockdep_is_held(&net->xfrm.xfrm_policy_lock)); 613 614 for (i = hmask; i >= 0; i--) 615 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir); 616 617 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst); 618 net->xfrm.policy_bydst[dir].hmask = nhashmask; 619 620 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation); 621 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 622 623 synchronize_rcu(); 624 625 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 626 } 627 628 static void xfrm_byidx_resize(struct net *net) 629 { 630 unsigned int hmask = net->xfrm.policy_idx_hmask; 631 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 632 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 633 struct hlist_head *oidx = net->xfrm.policy_byidx; 634 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 635 int i; 636 637 if (!nidx) 638 return; 639 640 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 641 642 for (i = hmask; i >= 0; i--) 643 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 644 645 net->xfrm.policy_byidx = nidx; 646 net->xfrm.policy_idx_hmask = nhashmask; 647 648 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 649 650 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 651 } 652 653 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) 654 { 655 unsigned int cnt = net->xfrm.policy_count[dir]; 656 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 657 658 if (total) 659 *total += cnt; 660 661 if ((hmask + 1) < xfrm_policy_hashmax && 662 cnt > hmask) 663 return 1; 664 665 return 0; 666 } 667 668 static inline int xfrm_byidx_should_resize(struct net *net, int total) 669 { 670 unsigned int hmask = net->xfrm.policy_idx_hmask; 671 672 if ((hmask + 1) < xfrm_policy_hashmax && 673 total > hmask) 674 return 1; 675 676 return 0; 677 } 678 679 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 680 { 681 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 682 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 683 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 684 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 685 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 686 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 687 si->spdhcnt = net->xfrm.policy_idx_hmask; 688 si->spdhmcnt = xfrm_policy_hashmax; 689 } 690 EXPORT_SYMBOL(xfrm_spd_getinfo); 691 692 static DEFINE_MUTEX(hash_resize_mutex); 693 static void xfrm_hash_resize(struct work_struct *work) 694 { 695 struct net *net = container_of(work, struct net, xfrm.policy_hash_work); 696 int dir, total; 697 698 mutex_lock(&hash_resize_mutex); 699 700 total = 0; 701 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 702 if (xfrm_bydst_should_resize(net, dir, &total)) 703 xfrm_bydst_resize(net, dir); 704 } 705 if (xfrm_byidx_should_resize(net, total)) 706 xfrm_byidx_resize(net); 707 708 mutex_unlock(&hash_resize_mutex); 709 } 710 711 /* Make sure *pol can be inserted into fastbin. 712 * Useful to check that later insert requests will be successful 713 * (provided xfrm_policy_lock is held throughout). 714 */ 715 static struct xfrm_pol_inexact_bin * 716 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir) 717 { 718 struct xfrm_pol_inexact_bin *bin, *prev; 719 struct xfrm_pol_inexact_key k = { 720 .family = pol->family, 721 .type = pol->type, 722 .dir = dir, 723 .if_id = pol->if_id, 724 }; 725 struct net *net = xp_net(pol); 726 727 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 728 729 write_pnet(&k.net, net); 730 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k, 731 xfrm_pol_inexact_params); 732 if (bin) 733 return bin; 734 735 bin = kzalloc(sizeof(*bin), GFP_ATOMIC); 736 if (!bin) 737 return NULL; 738 739 bin->k = k; 740 INIT_HLIST_HEAD(&bin->hhead); 741 bin->root_d = RB_ROOT; 742 bin->root_s = RB_ROOT; 743 seqcount_spinlock_init(&bin->count, &net->xfrm.xfrm_policy_lock); 744 745 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table, 746 &bin->k, &bin->head, 747 xfrm_pol_inexact_params); 748 if (!prev) { 749 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins); 750 return bin; 751 } 752 753 kfree(bin); 754 755 return IS_ERR(prev) ? NULL : prev; 756 } 757 758 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr, 759 int family, u8 prefixlen) 760 { 761 if (xfrm_addr_any(addr, family)) 762 return true; 763 764 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6) 765 return true; 766 767 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4) 768 return true; 769 770 return false; 771 } 772 773 static bool 774 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy) 775 { 776 const xfrm_address_t *addr; 777 bool saddr_any, daddr_any; 778 u8 prefixlen; 779 780 addr = &policy->selector.saddr; 781 prefixlen = policy->selector.prefixlen_s; 782 783 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr, 784 policy->family, 785 prefixlen); 786 addr = &policy->selector.daddr; 787 prefixlen = policy->selector.prefixlen_d; 788 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr, 789 policy->family, 790 prefixlen); 791 return saddr_any && daddr_any; 792 } 793 794 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node, 795 const xfrm_address_t *addr, u8 prefixlen) 796 { 797 node->addr = *addr; 798 node->prefixlen = prefixlen; 799 } 800 801 static struct xfrm_pol_inexact_node * 802 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen) 803 { 804 struct xfrm_pol_inexact_node *node; 805 806 node = kzalloc(sizeof(*node), GFP_ATOMIC); 807 if (node) 808 xfrm_pol_inexact_node_init(node, addr, prefixlen); 809 810 return node; 811 } 812 813 static int xfrm_policy_addr_delta(const xfrm_address_t *a, 814 const xfrm_address_t *b, 815 u8 prefixlen, u16 family) 816 { 817 u32 ma, mb, mask; 818 unsigned int pdw, pbi; 819 int delta = 0; 820 821 switch (family) { 822 case AF_INET: 823 if (prefixlen == 0) 824 return 0; 825 mask = ~0U << (32 - prefixlen); 826 ma = ntohl(a->a4) & mask; 827 mb = ntohl(b->a4) & mask; 828 if (ma < mb) 829 delta = -1; 830 else if (ma > mb) 831 delta = 1; 832 break; 833 case AF_INET6: 834 pdw = prefixlen >> 5; 835 pbi = prefixlen & 0x1f; 836 837 if (pdw) { 838 delta = memcmp(a->a6, b->a6, pdw << 2); 839 if (delta) 840 return delta; 841 } 842 if (pbi) { 843 mask = ~0U << (32 - pbi); 844 ma = ntohl(a->a6[pdw]) & mask; 845 mb = ntohl(b->a6[pdw]) & mask; 846 if (ma < mb) 847 delta = -1; 848 else if (ma > mb) 849 delta = 1; 850 } 851 break; 852 default: 853 break; 854 } 855 856 return delta; 857 } 858 859 static void xfrm_policy_inexact_list_reinsert(struct net *net, 860 struct xfrm_pol_inexact_node *n, 861 u16 family) 862 { 863 unsigned int matched_s, matched_d; 864 struct xfrm_policy *policy, *p; 865 866 matched_s = 0; 867 matched_d = 0; 868 869 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 870 struct hlist_node *newpos = NULL; 871 bool matches_s, matches_d; 872 873 if (policy->walk.dead || !policy->bydst_reinsert) 874 continue; 875 876 WARN_ON_ONCE(policy->family != family); 877 878 policy->bydst_reinsert = false; 879 hlist_for_each_entry(p, &n->hhead, bydst) { 880 if (policy->priority > p->priority) 881 newpos = &p->bydst; 882 else if (policy->priority == p->priority && 883 policy->pos > p->pos) 884 newpos = &p->bydst; 885 else 886 break; 887 } 888 889 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET) 890 hlist_add_behind_rcu(&policy->bydst, newpos); 891 else 892 hlist_add_head_rcu(&policy->bydst, &n->hhead); 893 894 /* paranoia checks follow. 895 * Check that the reinserted policy matches at least 896 * saddr or daddr for current node prefix. 897 * 898 * Matching both is fine, matching saddr in one policy 899 * (but not daddr) and then matching only daddr in another 900 * is a bug. 901 */ 902 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr, 903 &n->addr, 904 n->prefixlen, 905 family) == 0; 906 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr, 907 &n->addr, 908 n->prefixlen, 909 family) == 0; 910 if (matches_s && matches_d) 911 continue; 912 913 WARN_ON_ONCE(!matches_s && !matches_d); 914 if (matches_s) 915 matched_s++; 916 if (matches_d) 917 matched_d++; 918 WARN_ON_ONCE(matched_s && matched_d); 919 } 920 } 921 922 static void xfrm_policy_inexact_node_reinsert(struct net *net, 923 struct xfrm_pol_inexact_node *n, 924 struct rb_root *new, 925 u16 family) 926 { 927 struct xfrm_pol_inexact_node *node; 928 struct rb_node **p, *parent; 929 930 /* we should not have another subtree here */ 931 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root)); 932 restart: 933 parent = NULL; 934 p = &new->rb_node; 935 while (*p) { 936 u8 prefixlen; 937 int delta; 938 939 parent = *p; 940 node = rb_entry(*p, struct xfrm_pol_inexact_node, node); 941 942 prefixlen = min(node->prefixlen, n->prefixlen); 943 944 delta = xfrm_policy_addr_delta(&n->addr, &node->addr, 945 prefixlen, family); 946 if (delta < 0) { 947 p = &parent->rb_left; 948 } else if (delta > 0) { 949 p = &parent->rb_right; 950 } else { 951 bool same_prefixlen = node->prefixlen == n->prefixlen; 952 struct xfrm_policy *tmp; 953 954 hlist_for_each_entry(tmp, &n->hhead, bydst) { 955 tmp->bydst_reinsert = true; 956 hlist_del_rcu(&tmp->bydst); 957 } 958 959 node->prefixlen = prefixlen; 960 961 xfrm_policy_inexact_list_reinsert(net, node, family); 962 963 if (same_prefixlen) { 964 kfree_rcu(n, rcu); 965 return; 966 } 967 968 rb_erase(*p, new); 969 kfree_rcu(n, rcu); 970 n = node; 971 goto restart; 972 } 973 } 974 975 rb_link_node_rcu(&n->node, parent, p); 976 rb_insert_color(&n->node, new); 977 } 978 979 /* merge nodes v and n */ 980 static void xfrm_policy_inexact_node_merge(struct net *net, 981 struct xfrm_pol_inexact_node *v, 982 struct xfrm_pol_inexact_node *n, 983 u16 family) 984 { 985 struct xfrm_pol_inexact_node *node; 986 struct xfrm_policy *tmp; 987 struct rb_node *rnode; 988 989 /* To-be-merged node v has a subtree. 990 * 991 * Dismantle it and insert its nodes to n->root. 992 */ 993 while ((rnode = rb_first(&v->root)) != NULL) { 994 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node); 995 rb_erase(&node->node, &v->root); 996 xfrm_policy_inexact_node_reinsert(net, node, &n->root, 997 family); 998 } 999 1000 hlist_for_each_entry(tmp, &v->hhead, bydst) { 1001 tmp->bydst_reinsert = true; 1002 hlist_del_rcu(&tmp->bydst); 1003 } 1004 1005 xfrm_policy_inexact_list_reinsert(net, n, family); 1006 } 1007 1008 static struct xfrm_pol_inexact_node * 1009 xfrm_policy_inexact_insert_node(struct net *net, 1010 struct rb_root *root, 1011 xfrm_address_t *addr, 1012 u16 family, u8 prefixlen, u8 dir) 1013 { 1014 struct xfrm_pol_inexact_node *cached = NULL; 1015 struct rb_node **p, *parent = NULL; 1016 struct xfrm_pol_inexact_node *node; 1017 1018 p = &root->rb_node; 1019 while (*p) { 1020 int delta; 1021 1022 parent = *p; 1023 node = rb_entry(*p, struct xfrm_pol_inexact_node, node); 1024 1025 delta = xfrm_policy_addr_delta(addr, &node->addr, 1026 node->prefixlen, 1027 family); 1028 if (delta == 0 && prefixlen >= node->prefixlen) { 1029 WARN_ON_ONCE(cached); /* ipsec policies got lost */ 1030 return node; 1031 } 1032 1033 if (delta < 0) 1034 p = &parent->rb_left; 1035 else 1036 p = &parent->rb_right; 1037 1038 if (prefixlen < node->prefixlen) { 1039 delta = xfrm_policy_addr_delta(addr, &node->addr, 1040 prefixlen, 1041 family); 1042 if (delta) 1043 continue; 1044 1045 /* This node is a subnet of the new prefix. It needs 1046 * to be removed and re-inserted with the smaller 1047 * prefix and all nodes that are now also covered 1048 * by the reduced prefixlen. 1049 */ 1050 rb_erase(&node->node, root); 1051 1052 if (!cached) { 1053 xfrm_pol_inexact_node_init(node, addr, 1054 prefixlen); 1055 cached = node; 1056 } else { 1057 /* This node also falls within the new 1058 * prefixlen. Merge the to-be-reinserted 1059 * node and this one. 1060 */ 1061 xfrm_policy_inexact_node_merge(net, node, 1062 cached, family); 1063 kfree_rcu(node, rcu); 1064 } 1065 1066 /* restart */ 1067 p = &root->rb_node; 1068 parent = NULL; 1069 } 1070 } 1071 1072 node = cached; 1073 if (!node) { 1074 node = xfrm_pol_inexact_node_alloc(addr, prefixlen); 1075 if (!node) 1076 return NULL; 1077 } 1078 1079 rb_link_node_rcu(&node->node, parent, p); 1080 rb_insert_color(&node->node, root); 1081 1082 return node; 1083 } 1084 1085 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm) 1086 { 1087 struct xfrm_pol_inexact_node *node; 1088 struct rb_node *rn = rb_first(r); 1089 1090 while (rn) { 1091 node = rb_entry(rn, struct xfrm_pol_inexact_node, node); 1092 1093 xfrm_policy_inexact_gc_tree(&node->root, rm); 1094 rn = rb_next(rn); 1095 1096 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) { 1097 WARN_ON_ONCE(rm); 1098 continue; 1099 } 1100 1101 rb_erase(&node->node, r); 1102 kfree_rcu(node, rcu); 1103 } 1104 } 1105 1106 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit) 1107 { 1108 write_seqcount_begin(&b->count); 1109 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit); 1110 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit); 1111 write_seqcount_end(&b->count); 1112 1113 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) || 1114 !hlist_empty(&b->hhead)) { 1115 WARN_ON_ONCE(net_exit); 1116 return; 1117 } 1118 1119 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head, 1120 xfrm_pol_inexact_params) == 0) { 1121 list_del(&b->inexact_bins); 1122 kfree_rcu(b, rcu); 1123 } 1124 } 1125 1126 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b) 1127 { 1128 struct net *net = read_pnet(&b->k.net); 1129 1130 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1131 __xfrm_policy_inexact_prune_bin(b, false); 1132 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1133 } 1134 1135 static void __xfrm_policy_inexact_flush(struct net *net) 1136 { 1137 struct xfrm_pol_inexact_bin *bin, *t; 1138 1139 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1140 1141 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins) 1142 __xfrm_policy_inexact_prune_bin(bin, false); 1143 } 1144 1145 static struct hlist_head * 1146 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin, 1147 struct xfrm_policy *policy, u8 dir) 1148 { 1149 struct xfrm_pol_inexact_node *n; 1150 struct net *net; 1151 1152 net = xp_net(policy); 1153 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1154 1155 if (xfrm_policy_inexact_insert_use_any_list(policy)) 1156 return &bin->hhead; 1157 1158 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr, 1159 policy->family, 1160 policy->selector.prefixlen_d)) { 1161 write_seqcount_begin(&bin->count); 1162 n = xfrm_policy_inexact_insert_node(net, 1163 &bin->root_s, 1164 &policy->selector.saddr, 1165 policy->family, 1166 policy->selector.prefixlen_s, 1167 dir); 1168 write_seqcount_end(&bin->count); 1169 if (!n) 1170 return NULL; 1171 1172 return &n->hhead; 1173 } 1174 1175 /* daddr is fixed */ 1176 write_seqcount_begin(&bin->count); 1177 n = xfrm_policy_inexact_insert_node(net, 1178 &bin->root_d, 1179 &policy->selector.daddr, 1180 policy->family, 1181 policy->selector.prefixlen_d, dir); 1182 write_seqcount_end(&bin->count); 1183 if (!n) 1184 return NULL; 1185 1186 /* saddr is wildcard */ 1187 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr, 1188 policy->family, 1189 policy->selector.prefixlen_s)) 1190 return &n->hhead; 1191 1192 write_seqcount_begin(&bin->count); 1193 n = xfrm_policy_inexact_insert_node(net, 1194 &n->root, 1195 &policy->selector.saddr, 1196 policy->family, 1197 policy->selector.prefixlen_s, dir); 1198 write_seqcount_end(&bin->count); 1199 if (!n) 1200 return NULL; 1201 1202 return &n->hhead; 1203 } 1204 1205 static struct xfrm_policy * 1206 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl) 1207 { 1208 struct xfrm_pol_inexact_bin *bin; 1209 struct xfrm_policy *delpol; 1210 struct hlist_head *chain; 1211 struct net *net; 1212 1213 bin = xfrm_policy_inexact_alloc_bin(policy, dir); 1214 if (!bin) 1215 return ERR_PTR(-ENOMEM); 1216 1217 net = xp_net(policy); 1218 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1219 1220 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir); 1221 if (!chain) { 1222 __xfrm_policy_inexact_prune_bin(bin, false); 1223 return ERR_PTR(-ENOMEM); 1224 } 1225 1226 delpol = xfrm_policy_insert_list(chain, policy, excl); 1227 if (delpol && excl) { 1228 __xfrm_policy_inexact_prune_bin(bin, false); 1229 return ERR_PTR(-EEXIST); 1230 } 1231 1232 chain = &net->xfrm.policy_inexact[dir]; 1233 xfrm_policy_insert_inexact_list(chain, policy); 1234 1235 if (delpol) 1236 __xfrm_policy_inexact_prune_bin(bin, false); 1237 1238 return delpol; 1239 } 1240 1241 static void xfrm_hash_rebuild(struct work_struct *work) 1242 { 1243 struct net *net = container_of(work, struct net, 1244 xfrm.policy_hthresh.work); 1245 unsigned int hmask; 1246 struct xfrm_policy *pol; 1247 struct xfrm_policy *policy; 1248 struct hlist_head *chain; 1249 struct hlist_head *odst; 1250 struct hlist_node *newpos; 1251 int i; 1252 int dir; 1253 unsigned seq; 1254 u8 lbits4, rbits4, lbits6, rbits6; 1255 1256 mutex_lock(&hash_resize_mutex); 1257 1258 /* read selector prefixlen thresholds */ 1259 do { 1260 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock); 1261 1262 lbits4 = net->xfrm.policy_hthresh.lbits4; 1263 rbits4 = net->xfrm.policy_hthresh.rbits4; 1264 lbits6 = net->xfrm.policy_hthresh.lbits6; 1265 rbits6 = net->xfrm.policy_hthresh.rbits6; 1266 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); 1267 1268 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1269 write_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); 1270 1271 /* make sure that we can insert the indirect policies again before 1272 * we start with destructive action. 1273 */ 1274 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) { 1275 struct xfrm_pol_inexact_bin *bin; 1276 u8 dbits, sbits; 1277 1278 if (policy->walk.dead) 1279 continue; 1280 1281 dir = xfrm_policy_id2dir(policy->index); 1282 if (dir >= XFRM_POLICY_MAX) 1283 continue; 1284 1285 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { 1286 if (policy->family == AF_INET) { 1287 dbits = rbits4; 1288 sbits = lbits4; 1289 } else { 1290 dbits = rbits6; 1291 sbits = lbits6; 1292 } 1293 } else { 1294 if (policy->family == AF_INET) { 1295 dbits = lbits4; 1296 sbits = rbits4; 1297 } else { 1298 dbits = lbits6; 1299 sbits = rbits6; 1300 } 1301 } 1302 1303 if (policy->selector.prefixlen_d < dbits || 1304 policy->selector.prefixlen_s < sbits) 1305 continue; 1306 1307 bin = xfrm_policy_inexact_alloc_bin(policy, dir); 1308 if (!bin) 1309 goto out_unlock; 1310 1311 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir)) 1312 goto out_unlock; 1313 } 1314 1315 /* reset the bydst and inexact table in all directions */ 1316 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 1317 struct hlist_node *n; 1318 1319 hlist_for_each_entry_safe(policy, n, 1320 &net->xfrm.policy_inexact[dir], 1321 bydst_inexact_list) { 1322 hlist_del_rcu(&policy->bydst); 1323 hlist_del_init(&policy->bydst_inexact_list); 1324 } 1325 1326 hmask = net->xfrm.policy_bydst[dir].hmask; 1327 odst = net->xfrm.policy_bydst[dir].table; 1328 for (i = hmask; i >= 0; i--) { 1329 hlist_for_each_entry_safe(policy, n, odst + i, bydst) 1330 hlist_del_rcu(&policy->bydst); 1331 } 1332 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { 1333 /* dir out => dst = remote, src = local */ 1334 net->xfrm.policy_bydst[dir].dbits4 = rbits4; 1335 net->xfrm.policy_bydst[dir].sbits4 = lbits4; 1336 net->xfrm.policy_bydst[dir].dbits6 = rbits6; 1337 net->xfrm.policy_bydst[dir].sbits6 = lbits6; 1338 } else { 1339 /* dir in/fwd => dst = local, src = remote */ 1340 net->xfrm.policy_bydst[dir].dbits4 = lbits4; 1341 net->xfrm.policy_bydst[dir].sbits4 = rbits4; 1342 net->xfrm.policy_bydst[dir].dbits6 = lbits6; 1343 net->xfrm.policy_bydst[dir].sbits6 = rbits6; 1344 } 1345 } 1346 1347 /* re-insert all policies by order of creation */ 1348 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 1349 if (policy->walk.dead) 1350 continue; 1351 dir = xfrm_policy_id2dir(policy->index); 1352 if (dir >= XFRM_POLICY_MAX) { 1353 /* skip socket policies */ 1354 continue; 1355 } 1356 newpos = NULL; 1357 chain = policy_hash_bysel(net, &policy->selector, 1358 policy->family, dir); 1359 1360 if (!chain) { 1361 void *p = xfrm_policy_inexact_insert(policy, dir, 0); 1362 1363 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p)); 1364 continue; 1365 } 1366 1367 hlist_for_each_entry(pol, chain, bydst) { 1368 if (policy->priority >= pol->priority) 1369 newpos = &pol->bydst; 1370 else 1371 break; 1372 } 1373 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET) 1374 hlist_add_behind_rcu(&policy->bydst, newpos); 1375 else 1376 hlist_add_head_rcu(&policy->bydst, chain); 1377 } 1378 1379 out_unlock: 1380 __xfrm_policy_inexact_flush(net); 1381 write_seqcount_end(&net->xfrm.xfrm_policy_hash_generation); 1382 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1383 1384 mutex_unlock(&hash_resize_mutex); 1385 } 1386 1387 void xfrm_policy_hash_rebuild(struct net *net) 1388 { 1389 schedule_work(&net->xfrm.policy_hthresh.work); 1390 } 1391 EXPORT_SYMBOL(xfrm_policy_hash_rebuild); 1392 1393 /* Generate new index... KAME seems to generate them ordered by cost 1394 * of an absolute inpredictability of ordering of rules. This will not pass. */ 1395 static u32 xfrm_gen_index(struct net *net, int dir, u32 index) 1396 { 1397 for (;;) { 1398 struct hlist_head *list; 1399 struct xfrm_policy *p; 1400 u32 idx; 1401 int found; 1402 1403 if (!index) { 1404 idx = (net->xfrm.idx_generator | dir); 1405 net->xfrm.idx_generator += 8; 1406 } else { 1407 idx = index; 1408 index = 0; 1409 } 1410 1411 if (idx == 0) 1412 idx = 8; 1413 list = net->xfrm.policy_byidx + idx_hash(net, idx); 1414 found = 0; 1415 hlist_for_each_entry(p, list, byidx) { 1416 if (p->index == idx) { 1417 found = 1; 1418 break; 1419 } 1420 } 1421 if (!found) 1422 return idx; 1423 } 1424 } 1425 1426 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 1427 { 1428 u32 *p1 = (u32 *) s1; 1429 u32 *p2 = (u32 *) s2; 1430 int len = sizeof(struct xfrm_selector) / sizeof(u32); 1431 int i; 1432 1433 for (i = 0; i < len; i++) { 1434 if (p1[i] != p2[i]) 1435 return 1; 1436 } 1437 1438 return 0; 1439 } 1440 1441 static void xfrm_policy_requeue(struct xfrm_policy *old, 1442 struct xfrm_policy *new) 1443 { 1444 struct xfrm_policy_queue *pq = &old->polq; 1445 struct sk_buff_head list; 1446 1447 if (skb_queue_empty(&pq->hold_queue)) 1448 return; 1449 1450 __skb_queue_head_init(&list); 1451 1452 spin_lock_bh(&pq->hold_queue.lock); 1453 skb_queue_splice_init(&pq->hold_queue, &list); 1454 if (del_timer(&pq->hold_timer)) 1455 xfrm_pol_put(old); 1456 spin_unlock_bh(&pq->hold_queue.lock); 1457 1458 pq = &new->polq; 1459 1460 spin_lock_bh(&pq->hold_queue.lock); 1461 skb_queue_splice(&list, &pq->hold_queue); 1462 pq->timeout = XFRM_QUEUE_TMO_MIN; 1463 if (!mod_timer(&pq->hold_timer, jiffies)) 1464 xfrm_pol_hold(new); 1465 spin_unlock_bh(&pq->hold_queue.lock); 1466 } 1467 1468 static inline bool xfrm_policy_mark_match(const struct xfrm_mark *mark, 1469 struct xfrm_policy *pol) 1470 { 1471 return mark->v == pol->mark.v && mark->m == pol->mark.m; 1472 } 1473 1474 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed) 1475 { 1476 const struct xfrm_pol_inexact_key *k = data; 1477 u32 a = k->type << 24 | k->dir << 16 | k->family; 1478 1479 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)), 1480 seed); 1481 } 1482 1483 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed) 1484 { 1485 const struct xfrm_pol_inexact_bin *b = data; 1486 1487 return xfrm_pol_bin_key(&b->k, 0, seed); 1488 } 1489 1490 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg, 1491 const void *ptr) 1492 { 1493 const struct xfrm_pol_inexact_key *key = arg->key; 1494 const struct xfrm_pol_inexact_bin *b = ptr; 1495 int ret; 1496 1497 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net))) 1498 return -1; 1499 1500 ret = b->k.dir ^ key->dir; 1501 if (ret) 1502 return ret; 1503 1504 ret = b->k.type ^ key->type; 1505 if (ret) 1506 return ret; 1507 1508 ret = b->k.family ^ key->family; 1509 if (ret) 1510 return ret; 1511 1512 return b->k.if_id ^ key->if_id; 1513 } 1514 1515 static const struct rhashtable_params xfrm_pol_inexact_params = { 1516 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head), 1517 .hashfn = xfrm_pol_bin_key, 1518 .obj_hashfn = xfrm_pol_bin_obj, 1519 .obj_cmpfn = xfrm_pol_bin_cmp, 1520 .automatic_shrinking = true, 1521 }; 1522 1523 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, 1524 struct xfrm_policy *policy) 1525 { 1526 struct xfrm_policy *pol, *delpol = NULL; 1527 struct hlist_node *newpos = NULL; 1528 int i = 0; 1529 1530 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 1531 if (pol->type == policy->type && 1532 pol->if_id == policy->if_id && 1533 !selector_cmp(&pol->selector, &policy->selector) && 1534 xfrm_policy_mark_match(&policy->mark, pol) && 1535 xfrm_sec_ctx_match(pol->security, policy->security) && 1536 !WARN_ON(delpol)) { 1537 delpol = pol; 1538 if (policy->priority > pol->priority) 1539 continue; 1540 } else if (policy->priority >= pol->priority) { 1541 newpos = &pol->bydst_inexact_list; 1542 continue; 1543 } 1544 if (delpol) 1545 break; 1546 } 1547 1548 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET) 1549 hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos); 1550 else 1551 hlist_add_head_rcu(&policy->bydst_inexact_list, chain); 1552 1553 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 1554 pol->pos = i; 1555 i++; 1556 } 1557 } 1558 1559 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain, 1560 struct xfrm_policy *policy, 1561 bool excl) 1562 { 1563 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL; 1564 1565 hlist_for_each_entry(pol, chain, bydst) { 1566 if (pol->type == policy->type && 1567 pol->if_id == policy->if_id && 1568 !selector_cmp(&pol->selector, &policy->selector) && 1569 xfrm_policy_mark_match(&policy->mark, pol) && 1570 xfrm_sec_ctx_match(pol->security, policy->security) && 1571 !WARN_ON(delpol)) { 1572 if (excl) 1573 return ERR_PTR(-EEXIST); 1574 delpol = pol; 1575 if (policy->priority > pol->priority) 1576 continue; 1577 } else if (policy->priority >= pol->priority) { 1578 newpos = pol; 1579 continue; 1580 } 1581 if (delpol) 1582 break; 1583 } 1584 1585 if (newpos && policy->xdo.type != XFRM_DEV_OFFLOAD_PACKET) 1586 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst); 1587 else 1588 /* Packet offload policies enter to the head 1589 * to speed-up lookups. 1590 */ 1591 hlist_add_head_rcu(&policy->bydst, chain); 1592 1593 return delpol; 1594 } 1595 1596 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 1597 { 1598 struct net *net = xp_net(policy); 1599 struct xfrm_policy *delpol; 1600 struct hlist_head *chain; 1601 1602 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1603 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 1604 if (chain) 1605 delpol = xfrm_policy_insert_list(chain, policy, excl); 1606 else 1607 delpol = xfrm_policy_inexact_insert(policy, dir, excl); 1608 1609 if (IS_ERR(delpol)) { 1610 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1611 return PTR_ERR(delpol); 1612 } 1613 1614 __xfrm_policy_link(policy, dir); 1615 1616 /* After previous checking, family can either be AF_INET or AF_INET6 */ 1617 if (policy->family == AF_INET) 1618 rt_genid_bump_ipv4(net); 1619 else 1620 rt_genid_bump_ipv6(net); 1621 1622 if (delpol) { 1623 xfrm_policy_requeue(delpol, policy); 1624 __xfrm_policy_unlink(delpol, dir); 1625 } 1626 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index); 1627 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 1628 policy->curlft.add_time = ktime_get_real_seconds(); 1629 policy->curlft.use_time = 0; 1630 if (!mod_timer(&policy->timer, jiffies + HZ)) 1631 xfrm_pol_hold(policy); 1632 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1633 1634 if (delpol) 1635 xfrm_policy_kill(delpol); 1636 else if (xfrm_bydst_should_resize(net, dir, NULL)) 1637 schedule_work(&net->xfrm.policy_hash_work); 1638 1639 return 0; 1640 } 1641 EXPORT_SYMBOL(xfrm_policy_insert); 1642 1643 static struct xfrm_policy * 1644 __xfrm_policy_bysel_ctx(struct hlist_head *chain, const struct xfrm_mark *mark, 1645 u32 if_id, u8 type, int dir, struct xfrm_selector *sel, 1646 struct xfrm_sec_ctx *ctx) 1647 { 1648 struct xfrm_policy *pol; 1649 1650 if (!chain) 1651 return NULL; 1652 1653 hlist_for_each_entry(pol, chain, bydst) { 1654 if (pol->type == type && 1655 pol->if_id == if_id && 1656 xfrm_policy_mark_match(mark, pol) && 1657 !selector_cmp(sel, &pol->selector) && 1658 xfrm_sec_ctx_match(ctx, pol->security)) 1659 return pol; 1660 } 1661 1662 return NULL; 1663 } 1664 1665 struct xfrm_policy * 1666 xfrm_policy_bysel_ctx(struct net *net, const struct xfrm_mark *mark, u32 if_id, 1667 u8 type, int dir, struct xfrm_selector *sel, 1668 struct xfrm_sec_ctx *ctx, int delete, int *err) 1669 { 1670 struct xfrm_pol_inexact_bin *bin = NULL; 1671 struct xfrm_policy *pol, *ret = NULL; 1672 struct hlist_head *chain; 1673 1674 *err = 0; 1675 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1676 chain = policy_hash_bysel(net, sel, sel->family, dir); 1677 if (!chain) { 1678 struct xfrm_pol_inexact_candidates cand; 1679 int i; 1680 1681 bin = xfrm_policy_inexact_lookup(net, type, 1682 sel->family, dir, if_id); 1683 if (!bin) { 1684 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1685 return NULL; 1686 } 1687 1688 if (!xfrm_policy_find_inexact_candidates(&cand, bin, 1689 &sel->saddr, 1690 &sel->daddr)) { 1691 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1692 return NULL; 1693 } 1694 1695 pol = NULL; 1696 for (i = 0; i < ARRAY_SIZE(cand.res); i++) { 1697 struct xfrm_policy *tmp; 1698 1699 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark, 1700 if_id, type, dir, 1701 sel, ctx); 1702 if (!tmp) 1703 continue; 1704 1705 if (!pol || tmp->pos < pol->pos) 1706 pol = tmp; 1707 } 1708 } else { 1709 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir, 1710 sel, ctx); 1711 } 1712 1713 if (pol) { 1714 xfrm_pol_hold(pol); 1715 if (delete) { 1716 *err = security_xfrm_policy_delete(pol->security); 1717 if (*err) { 1718 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1719 return pol; 1720 } 1721 __xfrm_policy_unlink(pol, dir); 1722 } 1723 ret = pol; 1724 } 1725 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1726 1727 if (ret && delete) 1728 xfrm_policy_kill(ret); 1729 if (bin && delete) 1730 xfrm_policy_inexact_prune_bin(bin); 1731 return ret; 1732 } 1733 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 1734 1735 struct xfrm_policy * 1736 xfrm_policy_byid(struct net *net, const struct xfrm_mark *mark, u32 if_id, 1737 u8 type, int dir, u32 id, int delete, int *err) 1738 { 1739 struct xfrm_policy *pol, *ret; 1740 struct hlist_head *chain; 1741 1742 *err = -ENOENT; 1743 if (xfrm_policy_id2dir(id) != dir) 1744 return NULL; 1745 1746 *err = 0; 1747 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1748 chain = net->xfrm.policy_byidx + idx_hash(net, id); 1749 ret = NULL; 1750 hlist_for_each_entry(pol, chain, byidx) { 1751 if (pol->type == type && pol->index == id && 1752 pol->if_id == if_id && xfrm_policy_mark_match(mark, pol)) { 1753 xfrm_pol_hold(pol); 1754 if (delete) { 1755 *err = security_xfrm_policy_delete( 1756 pol->security); 1757 if (*err) { 1758 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1759 return pol; 1760 } 1761 __xfrm_policy_unlink(pol, dir); 1762 } 1763 ret = pol; 1764 break; 1765 } 1766 } 1767 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1768 1769 if (ret && delete) 1770 xfrm_policy_kill(ret); 1771 return ret; 1772 } 1773 EXPORT_SYMBOL(xfrm_policy_byid); 1774 1775 #ifdef CONFIG_SECURITY_NETWORK_XFRM 1776 static inline int 1777 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid) 1778 { 1779 struct xfrm_policy *pol; 1780 int err = 0; 1781 1782 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1783 if (pol->walk.dead || 1784 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX || 1785 pol->type != type) 1786 continue; 1787 1788 err = security_xfrm_policy_delete(pol->security); 1789 if (err) { 1790 xfrm_audit_policy_delete(pol, 0, task_valid); 1791 return err; 1792 } 1793 } 1794 return err; 1795 } 1796 1797 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net, 1798 struct net_device *dev, 1799 bool task_valid) 1800 { 1801 struct xfrm_policy *pol; 1802 int err = 0; 1803 1804 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1805 if (pol->walk.dead || 1806 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX || 1807 pol->xdo.dev != dev) 1808 continue; 1809 1810 err = security_xfrm_policy_delete(pol->security); 1811 if (err) { 1812 xfrm_audit_policy_delete(pol, 0, task_valid); 1813 return err; 1814 } 1815 } 1816 return err; 1817 } 1818 #else 1819 static inline int 1820 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid) 1821 { 1822 return 0; 1823 } 1824 1825 static inline int xfrm_dev_policy_flush_secctx_check(struct net *net, 1826 struct net_device *dev, 1827 bool task_valid) 1828 { 1829 return 0; 1830 } 1831 #endif 1832 1833 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid) 1834 { 1835 int dir, err = 0, cnt = 0; 1836 struct xfrm_policy *pol; 1837 1838 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1839 1840 err = xfrm_policy_flush_secctx_check(net, type, task_valid); 1841 if (err) 1842 goto out; 1843 1844 again: 1845 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1846 if (pol->walk.dead) 1847 continue; 1848 1849 dir = xfrm_policy_id2dir(pol->index); 1850 if (dir >= XFRM_POLICY_MAX || 1851 pol->type != type) 1852 continue; 1853 1854 __xfrm_policy_unlink(pol, dir); 1855 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1856 cnt++; 1857 xfrm_audit_policy_delete(pol, 1, task_valid); 1858 xfrm_policy_kill(pol); 1859 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1860 goto again; 1861 } 1862 if (cnt) 1863 __xfrm_policy_inexact_flush(net); 1864 else 1865 err = -ESRCH; 1866 out: 1867 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1868 return err; 1869 } 1870 EXPORT_SYMBOL(xfrm_policy_flush); 1871 1872 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev, 1873 bool task_valid) 1874 { 1875 int dir, err = 0, cnt = 0; 1876 struct xfrm_policy *pol; 1877 1878 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1879 1880 err = xfrm_dev_policy_flush_secctx_check(net, dev, task_valid); 1881 if (err) 1882 goto out; 1883 1884 again: 1885 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1886 if (pol->walk.dead) 1887 continue; 1888 1889 dir = xfrm_policy_id2dir(pol->index); 1890 if (dir >= XFRM_POLICY_MAX || 1891 pol->xdo.dev != dev) 1892 continue; 1893 1894 __xfrm_policy_unlink(pol, dir); 1895 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1896 cnt++; 1897 xfrm_audit_policy_delete(pol, 1, task_valid); 1898 xfrm_policy_kill(pol); 1899 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1900 goto again; 1901 } 1902 if (cnt) 1903 __xfrm_policy_inexact_flush(net); 1904 else 1905 err = -ESRCH; 1906 out: 1907 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1908 return err; 1909 } 1910 EXPORT_SYMBOL(xfrm_dev_policy_flush); 1911 1912 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 1913 int (*func)(struct xfrm_policy *, int, int, void*), 1914 void *data) 1915 { 1916 struct xfrm_policy *pol; 1917 struct xfrm_policy_walk_entry *x; 1918 int error = 0; 1919 1920 if (walk->type >= XFRM_POLICY_TYPE_MAX && 1921 walk->type != XFRM_POLICY_TYPE_ANY) 1922 return -EINVAL; 1923 1924 if (list_empty(&walk->walk.all) && walk->seq != 0) 1925 return 0; 1926 1927 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1928 if (list_empty(&walk->walk.all)) 1929 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); 1930 else 1931 x = list_first_entry(&walk->walk.all, 1932 struct xfrm_policy_walk_entry, all); 1933 1934 list_for_each_entry_from(x, &net->xfrm.policy_all, all) { 1935 if (x->dead) 1936 continue; 1937 pol = container_of(x, struct xfrm_policy, walk); 1938 if (walk->type != XFRM_POLICY_TYPE_ANY && 1939 walk->type != pol->type) 1940 continue; 1941 error = func(pol, xfrm_policy_id2dir(pol->index), 1942 walk->seq, data); 1943 if (error) { 1944 list_move_tail(&walk->walk.all, &x->all); 1945 goto out; 1946 } 1947 walk->seq++; 1948 } 1949 if (walk->seq == 0) { 1950 error = -ENOENT; 1951 goto out; 1952 } 1953 list_del_init(&walk->walk.all); 1954 out: 1955 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1956 return error; 1957 } 1958 EXPORT_SYMBOL(xfrm_policy_walk); 1959 1960 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) 1961 { 1962 INIT_LIST_HEAD(&walk->walk.all); 1963 walk->walk.dead = 1; 1964 walk->type = type; 1965 walk->seq = 0; 1966 } 1967 EXPORT_SYMBOL(xfrm_policy_walk_init); 1968 1969 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net) 1970 { 1971 if (list_empty(&walk->walk.all)) 1972 return; 1973 1974 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */ 1975 list_del(&walk->walk.all); 1976 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1977 } 1978 EXPORT_SYMBOL(xfrm_policy_walk_done); 1979 1980 /* 1981 * Find policy to apply to this flow. 1982 * 1983 * Returns 0 if policy found, else an -errno. 1984 */ 1985 static int xfrm_policy_match(const struct xfrm_policy *pol, 1986 const struct flowi *fl, 1987 u8 type, u16 family, u32 if_id) 1988 { 1989 const struct xfrm_selector *sel = &pol->selector; 1990 int ret = -ESRCH; 1991 bool match; 1992 1993 if (pol->family != family || 1994 pol->if_id != if_id || 1995 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 1996 pol->type != type) 1997 return ret; 1998 1999 match = xfrm_selector_match(sel, fl, family); 2000 if (match) 2001 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid); 2002 return ret; 2003 } 2004 2005 static struct xfrm_pol_inexact_node * 2006 xfrm_policy_lookup_inexact_addr(const struct rb_root *r, 2007 seqcount_spinlock_t *count, 2008 const xfrm_address_t *addr, u16 family) 2009 { 2010 const struct rb_node *parent; 2011 int seq; 2012 2013 again: 2014 seq = read_seqcount_begin(count); 2015 2016 parent = rcu_dereference_raw(r->rb_node); 2017 while (parent) { 2018 struct xfrm_pol_inexact_node *node; 2019 int delta; 2020 2021 node = rb_entry(parent, struct xfrm_pol_inexact_node, node); 2022 2023 delta = xfrm_policy_addr_delta(addr, &node->addr, 2024 node->prefixlen, family); 2025 if (delta < 0) { 2026 parent = rcu_dereference_raw(parent->rb_left); 2027 continue; 2028 } else if (delta > 0) { 2029 parent = rcu_dereference_raw(parent->rb_right); 2030 continue; 2031 } 2032 2033 return node; 2034 } 2035 2036 if (read_seqcount_retry(count, seq)) 2037 goto again; 2038 2039 return NULL; 2040 } 2041 2042 static bool 2043 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand, 2044 struct xfrm_pol_inexact_bin *b, 2045 const xfrm_address_t *saddr, 2046 const xfrm_address_t *daddr) 2047 { 2048 struct xfrm_pol_inexact_node *n; 2049 u16 family; 2050 2051 if (!b) 2052 return false; 2053 2054 family = b->k.family; 2055 memset(cand, 0, sizeof(*cand)); 2056 cand->res[XFRM_POL_CAND_ANY] = &b->hhead; 2057 2058 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr, 2059 family); 2060 if (n) { 2061 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead; 2062 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr, 2063 family); 2064 if (n) 2065 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead; 2066 } 2067 2068 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr, 2069 family); 2070 if (n) 2071 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead; 2072 2073 return true; 2074 } 2075 2076 static struct xfrm_pol_inexact_bin * 2077 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family, 2078 u8 dir, u32 if_id) 2079 { 2080 struct xfrm_pol_inexact_key k = { 2081 .family = family, 2082 .type = type, 2083 .dir = dir, 2084 .if_id = if_id, 2085 }; 2086 2087 write_pnet(&k.net, net); 2088 2089 return rhashtable_lookup(&xfrm_policy_inexact_table, &k, 2090 xfrm_pol_inexact_params); 2091 } 2092 2093 static struct xfrm_pol_inexact_bin * 2094 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, 2095 u8 dir, u32 if_id) 2096 { 2097 struct xfrm_pol_inexact_bin *bin; 2098 2099 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 2100 2101 rcu_read_lock(); 2102 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id); 2103 rcu_read_unlock(); 2104 2105 return bin; 2106 } 2107 2108 static struct xfrm_policy * 2109 __xfrm_policy_eval_candidates(struct hlist_head *chain, 2110 struct xfrm_policy *prefer, 2111 const struct flowi *fl, 2112 u8 type, u16 family, u32 if_id) 2113 { 2114 u32 priority = prefer ? prefer->priority : ~0u; 2115 struct xfrm_policy *pol; 2116 2117 if (!chain) 2118 return NULL; 2119 2120 hlist_for_each_entry_rcu(pol, chain, bydst) { 2121 int err; 2122 2123 if (pol->priority > priority) 2124 break; 2125 2126 err = xfrm_policy_match(pol, fl, type, family, if_id); 2127 if (err) { 2128 if (err != -ESRCH) 2129 return ERR_PTR(err); 2130 2131 continue; 2132 } 2133 2134 if (prefer) { 2135 /* matches. Is it older than *prefer? */ 2136 if (pol->priority == priority && 2137 prefer->pos < pol->pos) 2138 return prefer; 2139 } 2140 2141 return pol; 2142 } 2143 2144 return NULL; 2145 } 2146 2147 static struct xfrm_policy * 2148 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand, 2149 struct xfrm_policy *prefer, 2150 const struct flowi *fl, 2151 u8 type, u16 family, u32 if_id) 2152 { 2153 struct xfrm_policy *tmp; 2154 int i; 2155 2156 for (i = 0; i < ARRAY_SIZE(cand->res); i++) { 2157 tmp = __xfrm_policy_eval_candidates(cand->res[i], 2158 prefer, 2159 fl, type, family, if_id); 2160 if (!tmp) 2161 continue; 2162 2163 if (IS_ERR(tmp)) 2164 return tmp; 2165 prefer = tmp; 2166 } 2167 2168 return prefer; 2169 } 2170 2171 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, 2172 const struct flowi *fl, 2173 u16 family, u8 dir, 2174 u32 if_id) 2175 { 2176 struct xfrm_pol_inexact_candidates cand; 2177 const xfrm_address_t *daddr, *saddr; 2178 struct xfrm_pol_inexact_bin *bin; 2179 struct xfrm_policy *pol, *ret; 2180 struct hlist_head *chain; 2181 unsigned int sequence; 2182 int err; 2183 2184 daddr = xfrm_flowi_daddr(fl, family); 2185 saddr = xfrm_flowi_saddr(fl, family); 2186 if (unlikely(!daddr || !saddr)) 2187 return NULL; 2188 2189 rcu_read_lock(); 2190 retry: 2191 do { 2192 sequence = read_seqcount_begin(&net->xfrm.xfrm_policy_hash_generation); 2193 chain = policy_hash_direct(net, daddr, saddr, family, dir); 2194 } while (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence)); 2195 2196 ret = NULL; 2197 hlist_for_each_entry_rcu(pol, chain, bydst) { 2198 err = xfrm_policy_match(pol, fl, type, family, if_id); 2199 if (err) { 2200 if (err == -ESRCH) 2201 continue; 2202 else { 2203 ret = ERR_PTR(err); 2204 goto fail; 2205 } 2206 } else { 2207 ret = pol; 2208 break; 2209 } 2210 } 2211 if (ret && ret->xdo.type == XFRM_DEV_OFFLOAD_PACKET) 2212 goto skip_inexact; 2213 2214 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id); 2215 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr, 2216 daddr)) 2217 goto skip_inexact; 2218 2219 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type, 2220 family, if_id); 2221 if (pol) { 2222 ret = pol; 2223 if (IS_ERR(pol)) 2224 goto fail; 2225 } 2226 2227 skip_inexact: 2228 if (read_seqcount_retry(&net->xfrm.xfrm_policy_hash_generation, sequence)) 2229 goto retry; 2230 2231 if (ret && !xfrm_pol_hold_rcu(ret)) 2232 goto retry; 2233 fail: 2234 rcu_read_unlock(); 2235 2236 return ret; 2237 } 2238 2239 static struct xfrm_policy *xfrm_policy_lookup(struct net *net, 2240 const struct flowi *fl, 2241 u16 family, u8 dir, u32 if_id) 2242 { 2243 #ifdef CONFIG_XFRM_SUB_POLICY 2244 struct xfrm_policy *pol; 2245 2246 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, 2247 dir, if_id); 2248 if (pol != NULL) 2249 return pol; 2250 #endif 2251 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, 2252 dir, if_id); 2253 } 2254 2255 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, 2256 const struct flowi *fl, 2257 u16 family, u32 if_id) 2258 { 2259 struct xfrm_policy *pol; 2260 2261 rcu_read_lock(); 2262 again: 2263 pol = rcu_dereference(sk->sk_policy[dir]); 2264 if (pol != NULL) { 2265 bool match; 2266 int err = 0; 2267 2268 if (pol->family != family) { 2269 pol = NULL; 2270 goto out; 2271 } 2272 2273 match = xfrm_selector_match(&pol->selector, fl, family); 2274 if (match) { 2275 if ((READ_ONCE(sk->sk_mark) & pol->mark.m) != pol->mark.v || 2276 pol->if_id != if_id) { 2277 pol = NULL; 2278 goto out; 2279 } 2280 err = security_xfrm_policy_lookup(pol->security, 2281 fl->flowi_secid); 2282 if (!err) { 2283 if (!xfrm_pol_hold_rcu(pol)) 2284 goto again; 2285 } else if (err == -ESRCH) { 2286 pol = NULL; 2287 } else { 2288 pol = ERR_PTR(err); 2289 } 2290 } else 2291 pol = NULL; 2292 } 2293 out: 2294 rcu_read_unlock(); 2295 return pol; 2296 } 2297 2298 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 2299 { 2300 struct net *net = xp_net(pol); 2301 2302 list_add(&pol->walk.all, &net->xfrm.policy_all); 2303 net->xfrm.policy_count[dir]++; 2304 xfrm_pol_hold(pol); 2305 } 2306 2307 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 2308 int dir) 2309 { 2310 struct net *net = xp_net(pol); 2311 2312 if (list_empty(&pol->walk.all)) 2313 return NULL; 2314 2315 /* Socket policies are not hashed. */ 2316 if (!hlist_unhashed(&pol->bydst)) { 2317 hlist_del_rcu(&pol->bydst); 2318 hlist_del_init(&pol->bydst_inexact_list); 2319 hlist_del(&pol->byidx); 2320 } 2321 2322 list_del_init(&pol->walk.all); 2323 net->xfrm.policy_count[dir]--; 2324 2325 return pol; 2326 } 2327 2328 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir) 2329 { 2330 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir); 2331 } 2332 2333 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir) 2334 { 2335 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir); 2336 } 2337 2338 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 2339 { 2340 struct net *net = xp_net(pol); 2341 2342 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2343 pol = __xfrm_policy_unlink(pol, dir); 2344 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2345 if (pol) { 2346 xfrm_policy_kill(pol); 2347 return 0; 2348 } 2349 return -ENOENT; 2350 } 2351 EXPORT_SYMBOL(xfrm_policy_delete); 2352 2353 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 2354 { 2355 struct net *net = sock_net(sk); 2356 struct xfrm_policy *old_pol; 2357 2358 #ifdef CONFIG_XFRM_SUB_POLICY 2359 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 2360 return -EINVAL; 2361 #endif 2362 2363 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2364 old_pol = rcu_dereference_protected(sk->sk_policy[dir], 2365 lockdep_is_held(&net->xfrm.xfrm_policy_lock)); 2366 if (pol) { 2367 pol->curlft.add_time = ktime_get_real_seconds(); 2368 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); 2369 xfrm_sk_policy_link(pol, dir); 2370 } 2371 rcu_assign_pointer(sk->sk_policy[dir], pol); 2372 if (old_pol) { 2373 if (pol) 2374 xfrm_policy_requeue(old_pol, pol); 2375 2376 /* Unlinking succeeds always. This is the only function 2377 * allowed to delete or replace socket policy. 2378 */ 2379 xfrm_sk_policy_unlink(old_pol, dir); 2380 } 2381 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2382 2383 if (old_pol) { 2384 xfrm_policy_kill(old_pol); 2385 } 2386 return 0; 2387 } 2388 2389 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) 2390 { 2391 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); 2392 struct net *net = xp_net(old); 2393 2394 if (newp) { 2395 newp->selector = old->selector; 2396 if (security_xfrm_policy_clone(old->security, 2397 &newp->security)) { 2398 kfree(newp); 2399 return NULL; /* ENOMEM */ 2400 } 2401 newp->lft = old->lft; 2402 newp->curlft = old->curlft; 2403 newp->mark = old->mark; 2404 newp->if_id = old->if_id; 2405 newp->action = old->action; 2406 newp->flags = old->flags; 2407 newp->xfrm_nr = old->xfrm_nr; 2408 newp->index = old->index; 2409 newp->type = old->type; 2410 newp->family = old->family; 2411 memcpy(newp->xfrm_vec, old->xfrm_vec, 2412 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 2413 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2414 xfrm_sk_policy_link(newp, dir); 2415 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2416 xfrm_pol_put(newp); 2417 } 2418 return newp; 2419 } 2420 2421 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) 2422 { 2423 const struct xfrm_policy *p; 2424 struct xfrm_policy *np; 2425 int i, ret = 0; 2426 2427 rcu_read_lock(); 2428 for (i = 0; i < 2; i++) { 2429 p = rcu_dereference(osk->sk_policy[i]); 2430 if (p) { 2431 np = clone_policy(p, i); 2432 if (unlikely(!np)) { 2433 ret = -ENOMEM; 2434 break; 2435 } 2436 rcu_assign_pointer(sk->sk_policy[i], np); 2437 } 2438 } 2439 rcu_read_unlock(); 2440 return ret; 2441 } 2442 2443 static int 2444 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local, 2445 xfrm_address_t *remote, unsigned short family, u32 mark) 2446 { 2447 int err; 2448 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2449 2450 if (unlikely(afinfo == NULL)) 2451 return -EINVAL; 2452 err = afinfo->get_saddr(net, oif, local, remote, mark); 2453 rcu_read_unlock(); 2454 return err; 2455 } 2456 2457 /* Resolve list of templates for the flow, given policy. */ 2458 2459 static int 2460 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, 2461 struct xfrm_state **xfrm, unsigned short family) 2462 { 2463 struct net *net = xp_net(policy); 2464 int nx; 2465 int i, error; 2466 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 2467 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 2468 xfrm_address_t tmp; 2469 2470 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) { 2471 struct xfrm_state *x; 2472 xfrm_address_t *remote = daddr; 2473 xfrm_address_t *local = saddr; 2474 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 2475 2476 if (tmpl->mode == XFRM_MODE_TUNNEL || 2477 tmpl->mode == XFRM_MODE_BEET) { 2478 remote = &tmpl->id.daddr; 2479 local = &tmpl->saddr; 2480 if (xfrm_addr_any(local, tmpl->encap_family)) { 2481 error = xfrm_get_saddr(net, fl->flowi_oif, 2482 &tmp, remote, 2483 tmpl->encap_family, 0); 2484 if (error) 2485 goto fail; 2486 local = &tmp; 2487 } 2488 } 2489 2490 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, 2491 family, policy->if_id); 2492 if (x && x->dir && x->dir != XFRM_SA_DIR_OUT) { 2493 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEDIRERROR); 2494 xfrm_state_put(x); 2495 error = -EINVAL; 2496 goto fail; 2497 } 2498 2499 if (x && x->km.state == XFRM_STATE_VALID) { 2500 xfrm[nx++] = x; 2501 daddr = remote; 2502 saddr = local; 2503 continue; 2504 } 2505 if (x) { 2506 error = (x->km.state == XFRM_STATE_ERROR ? 2507 -EINVAL : -EAGAIN); 2508 xfrm_state_put(x); 2509 } else if (error == -ESRCH) { 2510 error = -EAGAIN; 2511 } 2512 2513 if (!tmpl->optional) 2514 goto fail; 2515 } 2516 return nx; 2517 2518 fail: 2519 for (nx--; nx >= 0; nx--) 2520 xfrm_state_put(xfrm[nx]); 2521 return error; 2522 } 2523 2524 static int 2525 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, 2526 struct xfrm_state **xfrm, unsigned short family) 2527 { 2528 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 2529 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 2530 int cnx = 0; 2531 int error; 2532 int ret; 2533 int i; 2534 2535 for (i = 0; i < npols; i++) { 2536 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 2537 error = -ENOBUFS; 2538 goto fail; 2539 } 2540 2541 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 2542 if (ret < 0) { 2543 error = ret; 2544 goto fail; 2545 } else 2546 cnx += ret; 2547 } 2548 2549 /* found states are sorted for outbound processing */ 2550 if (npols > 1) 2551 xfrm_state_sort(xfrm, tpp, cnx, family); 2552 2553 return cnx; 2554 2555 fail: 2556 for (cnx--; cnx >= 0; cnx--) 2557 xfrm_state_put(tpp[cnx]); 2558 return error; 2559 2560 } 2561 2562 static int xfrm_get_tos(const struct flowi *fl, int family) 2563 { 2564 if (family == AF_INET) 2565 return fl->u.ip4.flowi4_tos & INET_DSCP_MASK; 2566 2567 return 0; 2568 } 2569 2570 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 2571 { 2572 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2573 struct dst_ops *dst_ops; 2574 struct xfrm_dst *xdst; 2575 2576 if (!afinfo) 2577 return ERR_PTR(-EINVAL); 2578 2579 switch (family) { 2580 case AF_INET: 2581 dst_ops = &net->xfrm.xfrm4_dst_ops; 2582 break; 2583 #if IS_ENABLED(CONFIG_IPV6) 2584 case AF_INET6: 2585 dst_ops = &net->xfrm.xfrm6_dst_ops; 2586 break; 2587 #endif 2588 default: 2589 BUG(); 2590 } 2591 xdst = dst_alloc(dst_ops, NULL, DST_OBSOLETE_NONE, 0); 2592 2593 if (likely(xdst)) { 2594 memset_after(xdst, 0, u.dst); 2595 } else 2596 xdst = ERR_PTR(-ENOBUFS); 2597 2598 rcu_read_unlock(); 2599 2600 return xdst; 2601 } 2602 2603 static void xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 2604 int nfheader_len) 2605 { 2606 if (dst->ops->family == AF_INET6) { 2607 path->path_cookie = rt6_get_cookie(dst_rt6_info(dst)); 2608 path->u.rt6.rt6i_nfheader_len = nfheader_len; 2609 } 2610 } 2611 2612 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, 2613 const struct flowi *fl) 2614 { 2615 const struct xfrm_policy_afinfo *afinfo = 2616 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 2617 int err; 2618 2619 if (!afinfo) 2620 return -EINVAL; 2621 2622 err = afinfo->fill_dst(xdst, dev, fl); 2623 2624 rcu_read_unlock(); 2625 2626 return err; 2627 } 2628 2629 2630 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 2631 * all the metrics... Shortly, bundle a bundle. 2632 */ 2633 2634 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 2635 struct xfrm_state **xfrm, 2636 struct xfrm_dst **bundle, 2637 int nx, 2638 const struct flowi *fl, 2639 struct dst_entry *dst) 2640 { 2641 const struct xfrm_state_afinfo *afinfo; 2642 const struct xfrm_mode *inner_mode; 2643 struct net *net = xp_net(policy); 2644 unsigned long now = jiffies; 2645 struct net_device *dev; 2646 struct xfrm_dst *xdst_prev = NULL; 2647 struct xfrm_dst *xdst0 = NULL; 2648 int i = 0; 2649 int err; 2650 int header_len = 0; 2651 int nfheader_len = 0; 2652 int trailer_len = 0; 2653 int tos; 2654 int family = policy->selector.family; 2655 xfrm_address_t saddr, daddr; 2656 2657 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 2658 2659 tos = xfrm_get_tos(fl, family); 2660 2661 dst_hold(dst); 2662 2663 for (; i < nx; i++) { 2664 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 2665 struct dst_entry *dst1 = &xdst->u.dst; 2666 2667 err = PTR_ERR(xdst); 2668 if (IS_ERR(xdst)) { 2669 dst_release(dst); 2670 goto put_states; 2671 } 2672 2673 bundle[i] = xdst; 2674 if (!xdst_prev) 2675 xdst0 = xdst; 2676 else 2677 /* Ref count is taken during xfrm_alloc_dst() 2678 * No need to do dst_clone() on dst1 2679 */ 2680 xfrm_dst_set_child(xdst_prev, &xdst->u.dst); 2681 2682 if (xfrm[i]->sel.family == AF_UNSPEC) { 2683 inner_mode = xfrm_ip2inner_mode(xfrm[i], 2684 xfrm_af2proto(family)); 2685 if (!inner_mode) { 2686 err = -EAFNOSUPPORT; 2687 dst_release(dst); 2688 goto put_states; 2689 } 2690 } else 2691 inner_mode = &xfrm[i]->inner_mode; 2692 2693 xdst->route = dst; 2694 dst_copy_metrics(dst1, dst); 2695 2696 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 2697 __u32 mark = 0; 2698 int oif; 2699 2700 if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m) 2701 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); 2702 2703 if (xfrm[i]->xso.type != XFRM_DEV_OFFLOAD_PACKET) 2704 family = xfrm[i]->props.family; 2705 2706 oif = fl->flowi_oif ? : fl->flowi_l3mdev; 2707 dst = xfrm_dst_lookup(xfrm[i], tos, oif, 2708 &saddr, &daddr, family, mark); 2709 err = PTR_ERR(dst); 2710 if (IS_ERR(dst)) 2711 goto put_states; 2712 } else 2713 dst_hold(dst); 2714 2715 dst1->xfrm = xfrm[i]; 2716 xdst->xfrm_genid = xfrm[i]->genid; 2717 2718 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 2719 dst1->lastuse = now; 2720 2721 dst1->input = dst_discard; 2722 2723 rcu_read_lock(); 2724 afinfo = xfrm_state_afinfo_get_rcu(inner_mode->family); 2725 if (likely(afinfo)) 2726 dst1->output = afinfo->output; 2727 else 2728 dst1->output = dst_discard_out; 2729 rcu_read_unlock(); 2730 2731 xdst_prev = xdst; 2732 2733 header_len += xfrm[i]->props.header_len; 2734 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 2735 nfheader_len += xfrm[i]->props.header_len; 2736 trailer_len += xfrm[i]->props.trailer_len; 2737 } 2738 2739 xfrm_dst_set_child(xdst_prev, dst); 2740 xdst0->path = dst; 2741 2742 err = -ENODEV; 2743 dev = dst->dev; 2744 if (!dev) 2745 goto free_dst; 2746 2747 xfrm_init_path(xdst0, dst, nfheader_len); 2748 xfrm_init_pmtu(bundle, nx); 2749 2750 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst; 2751 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) { 2752 err = xfrm_fill_dst(xdst_prev, dev, fl); 2753 if (err) 2754 goto free_dst; 2755 2756 xdst_prev->u.dst.header_len = header_len; 2757 xdst_prev->u.dst.trailer_len = trailer_len; 2758 header_len -= xdst_prev->u.dst.xfrm->props.header_len; 2759 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len; 2760 } 2761 2762 return &xdst0->u.dst; 2763 2764 put_states: 2765 for (; i < nx; i++) 2766 xfrm_state_put(xfrm[i]); 2767 free_dst: 2768 if (xdst0) 2769 dst_release_immediate(&xdst0->u.dst); 2770 2771 return ERR_PTR(err); 2772 } 2773 2774 static int xfrm_expand_policies(const struct flowi *fl, u16 family, 2775 struct xfrm_policy **pols, 2776 int *num_pols, int *num_xfrms) 2777 { 2778 int i; 2779 2780 if (*num_pols == 0 || !pols[0]) { 2781 *num_pols = 0; 2782 *num_xfrms = 0; 2783 return 0; 2784 } 2785 if (IS_ERR(pols[0])) { 2786 *num_pols = 0; 2787 return PTR_ERR(pols[0]); 2788 } 2789 2790 *num_xfrms = pols[0]->xfrm_nr; 2791 2792 #ifdef CONFIG_XFRM_SUB_POLICY 2793 if (pols[0]->action == XFRM_POLICY_ALLOW && 2794 pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 2795 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), 2796 XFRM_POLICY_TYPE_MAIN, 2797 fl, family, 2798 XFRM_POLICY_OUT, 2799 pols[0]->if_id); 2800 if (pols[1]) { 2801 if (IS_ERR(pols[1])) { 2802 xfrm_pols_put(pols, *num_pols); 2803 *num_pols = 0; 2804 return PTR_ERR(pols[1]); 2805 } 2806 (*num_pols)++; 2807 (*num_xfrms) += pols[1]->xfrm_nr; 2808 } 2809 } 2810 #endif 2811 for (i = 0; i < *num_pols; i++) { 2812 if (pols[i]->action != XFRM_POLICY_ALLOW) { 2813 *num_xfrms = -1; 2814 break; 2815 } 2816 } 2817 2818 return 0; 2819 2820 } 2821 2822 static struct xfrm_dst * 2823 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, 2824 const struct flowi *fl, u16 family, 2825 struct dst_entry *dst_orig) 2826 { 2827 struct net *net = xp_net(pols[0]); 2828 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 2829 struct xfrm_dst *bundle[XFRM_MAX_DEPTH]; 2830 struct xfrm_dst *xdst; 2831 struct dst_entry *dst; 2832 int err; 2833 2834 /* Try to instantiate a bundle */ 2835 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 2836 if (err <= 0) { 2837 if (err == 0) 2838 return NULL; 2839 2840 if (err != -EAGAIN) 2841 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 2842 return ERR_PTR(err); 2843 } 2844 2845 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig); 2846 if (IS_ERR(dst)) { 2847 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); 2848 return ERR_CAST(dst); 2849 } 2850 2851 xdst = (struct xfrm_dst *)dst; 2852 xdst->num_xfrms = err; 2853 xdst->num_pols = num_pols; 2854 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 2855 xdst->policy_genid = atomic_read(&pols[0]->genid); 2856 2857 return xdst; 2858 } 2859 2860 static void xfrm_policy_queue_process(struct timer_list *t) 2861 { 2862 struct sk_buff *skb; 2863 struct sock *sk; 2864 struct dst_entry *dst; 2865 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer); 2866 struct net *net = xp_net(pol); 2867 struct xfrm_policy_queue *pq = &pol->polq; 2868 struct flowi fl; 2869 struct sk_buff_head list; 2870 __u32 skb_mark; 2871 2872 spin_lock(&pq->hold_queue.lock); 2873 skb = skb_peek(&pq->hold_queue); 2874 if (!skb) { 2875 spin_unlock(&pq->hold_queue.lock); 2876 goto out; 2877 } 2878 dst = skb_dst(skb); 2879 sk = skb->sk; 2880 2881 /* Fixup the mark to support VTI. */ 2882 skb_mark = skb->mark; 2883 skb->mark = pol->mark.v; 2884 xfrm_decode_session(net, skb, &fl, dst->ops->family); 2885 skb->mark = skb_mark; 2886 spin_unlock(&pq->hold_queue.lock); 2887 2888 dst_hold(xfrm_dst_path(dst)); 2889 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE); 2890 if (IS_ERR(dst)) 2891 goto purge_queue; 2892 2893 if (dst->flags & DST_XFRM_QUEUE) { 2894 dst_release(dst); 2895 2896 if (pq->timeout >= XFRM_QUEUE_TMO_MAX) 2897 goto purge_queue; 2898 2899 pq->timeout = pq->timeout << 1; 2900 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout)) 2901 xfrm_pol_hold(pol); 2902 goto out; 2903 } 2904 2905 dst_release(dst); 2906 2907 __skb_queue_head_init(&list); 2908 2909 spin_lock(&pq->hold_queue.lock); 2910 pq->timeout = 0; 2911 skb_queue_splice_init(&pq->hold_queue, &list); 2912 spin_unlock(&pq->hold_queue.lock); 2913 2914 while (!skb_queue_empty(&list)) { 2915 skb = __skb_dequeue(&list); 2916 2917 /* Fixup the mark to support VTI. */ 2918 skb_mark = skb->mark; 2919 skb->mark = pol->mark.v; 2920 xfrm_decode_session(net, skb, &fl, skb_dst(skb)->ops->family); 2921 skb->mark = skb_mark; 2922 2923 dst_hold(xfrm_dst_path(skb_dst(skb))); 2924 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0); 2925 if (IS_ERR(dst)) { 2926 kfree_skb(skb); 2927 continue; 2928 } 2929 2930 nf_reset_ct(skb); 2931 skb_dst_drop(skb); 2932 skb_dst_set(skb, dst); 2933 2934 dst_output(net, skb->sk, skb); 2935 } 2936 2937 out: 2938 xfrm_pol_put(pol); 2939 return; 2940 2941 purge_queue: 2942 pq->timeout = 0; 2943 skb_queue_purge(&pq->hold_queue); 2944 xfrm_pol_put(pol); 2945 } 2946 2947 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb) 2948 { 2949 unsigned long sched_next; 2950 struct dst_entry *dst = skb_dst(skb); 2951 struct xfrm_dst *xdst = (struct xfrm_dst *) dst; 2952 struct xfrm_policy *pol = xdst->pols[0]; 2953 struct xfrm_policy_queue *pq = &pol->polq; 2954 2955 if (unlikely(skb_fclone_busy(sk, skb))) { 2956 kfree_skb(skb); 2957 return 0; 2958 } 2959 2960 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { 2961 kfree_skb(skb); 2962 return -EAGAIN; 2963 } 2964 2965 skb_dst_force(skb); 2966 2967 spin_lock_bh(&pq->hold_queue.lock); 2968 2969 if (!pq->timeout) 2970 pq->timeout = XFRM_QUEUE_TMO_MIN; 2971 2972 sched_next = jiffies + pq->timeout; 2973 2974 if (del_timer(&pq->hold_timer)) { 2975 if (time_before(pq->hold_timer.expires, sched_next)) 2976 sched_next = pq->hold_timer.expires; 2977 xfrm_pol_put(pol); 2978 } 2979 2980 __skb_queue_tail(&pq->hold_queue, skb); 2981 if (!mod_timer(&pq->hold_timer, sched_next)) 2982 xfrm_pol_hold(pol); 2983 2984 spin_unlock_bh(&pq->hold_queue.lock); 2985 2986 return 0; 2987 } 2988 2989 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, 2990 struct xfrm_flo *xflo, 2991 const struct flowi *fl, 2992 int num_xfrms, 2993 u16 family) 2994 { 2995 int err; 2996 struct net_device *dev; 2997 struct dst_entry *dst; 2998 struct dst_entry *dst1; 2999 struct xfrm_dst *xdst; 3000 3001 xdst = xfrm_alloc_dst(net, family); 3002 if (IS_ERR(xdst)) 3003 return xdst; 3004 3005 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) || 3006 net->xfrm.sysctl_larval_drop || 3007 num_xfrms <= 0) 3008 return xdst; 3009 3010 dst = xflo->dst_orig; 3011 dst1 = &xdst->u.dst; 3012 dst_hold(dst); 3013 xdst->route = dst; 3014 3015 dst_copy_metrics(dst1, dst); 3016 3017 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 3018 dst1->flags |= DST_XFRM_QUEUE; 3019 dst1->lastuse = jiffies; 3020 3021 dst1->input = dst_discard; 3022 dst1->output = xdst_queue_output; 3023 3024 dst_hold(dst); 3025 xfrm_dst_set_child(xdst, dst); 3026 xdst->path = dst; 3027 3028 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0); 3029 3030 err = -ENODEV; 3031 dev = dst->dev; 3032 if (!dev) 3033 goto free_dst; 3034 3035 err = xfrm_fill_dst(xdst, dev, fl); 3036 if (err) 3037 goto free_dst; 3038 3039 out: 3040 return xdst; 3041 3042 free_dst: 3043 dst_release(dst1); 3044 xdst = ERR_PTR(err); 3045 goto out; 3046 } 3047 3048 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net, 3049 const struct flowi *fl, 3050 u16 family, u8 dir, 3051 struct xfrm_flo *xflo, u32 if_id) 3052 { 3053 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 3054 int num_pols = 0, num_xfrms = 0, err; 3055 struct xfrm_dst *xdst; 3056 3057 /* Resolve policies to use if we couldn't get them from 3058 * previous cache entry */ 3059 num_pols = 1; 3060 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id); 3061 err = xfrm_expand_policies(fl, family, pols, 3062 &num_pols, &num_xfrms); 3063 if (err < 0) 3064 goto inc_error; 3065 if (num_pols == 0) 3066 return NULL; 3067 if (num_xfrms <= 0) 3068 goto make_dummy_bundle; 3069 3070 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, 3071 xflo->dst_orig); 3072 if (IS_ERR(xdst)) { 3073 err = PTR_ERR(xdst); 3074 if (err == -EREMOTE) { 3075 xfrm_pols_put(pols, num_pols); 3076 return NULL; 3077 } 3078 3079 if (err != -EAGAIN) 3080 goto error; 3081 goto make_dummy_bundle; 3082 } else if (xdst == NULL) { 3083 num_xfrms = 0; 3084 goto make_dummy_bundle; 3085 } 3086 3087 return xdst; 3088 3089 make_dummy_bundle: 3090 /* We found policies, but there's no bundles to instantiate: 3091 * either because the policy blocks, has no transformations or 3092 * we could not build template (no xfrm_states).*/ 3093 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family); 3094 if (IS_ERR(xdst)) { 3095 xfrm_pols_put(pols, num_pols); 3096 return ERR_CAST(xdst); 3097 } 3098 xdst->num_pols = num_pols; 3099 xdst->num_xfrms = num_xfrms; 3100 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 3101 3102 return xdst; 3103 3104 inc_error: 3105 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 3106 error: 3107 xfrm_pols_put(pols, num_pols); 3108 return ERR_PTR(err); 3109 } 3110 3111 static struct dst_entry *make_blackhole(struct net *net, u16 family, 3112 struct dst_entry *dst_orig) 3113 { 3114 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 3115 struct dst_entry *ret; 3116 3117 if (!afinfo) { 3118 dst_release(dst_orig); 3119 return ERR_PTR(-EINVAL); 3120 } else { 3121 ret = afinfo->blackhole_route(net, dst_orig); 3122 } 3123 rcu_read_unlock(); 3124 3125 return ret; 3126 } 3127 3128 /* Finds/creates a bundle for given flow and if_id 3129 * 3130 * At the moment we eat a raw IP route. Mostly to speed up lookups 3131 * on interfaces with disabled IPsec. 3132 * 3133 * xfrm_lookup uses an if_id of 0 by default, and is provided for 3134 * compatibility 3135 */ 3136 struct dst_entry *xfrm_lookup_with_ifid(struct net *net, 3137 struct dst_entry *dst_orig, 3138 const struct flowi *fl, 3139 const struct sock *sk, 3140 int flags, u32 if_id) 3141 { 3142 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 3143 struct xfrm_dst *xdst; 3144 struct dst_entry *dst, *route; 3145 u16 family = dst_orig->ops->family; 3146 u8 dir = XFRM_POLICY_OUT; 3147 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 3148 3149 dst = NULL; 3150 xdst = NULL; 3151 route = NULL; 3152 3153 sk = sk_const_to_full_sk(sk); 3154 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 3155 num_pols = 1; 3156 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family, 3157 if_id); 3158 err = xfrm_expand_policies(fl, family, pols, 3159 &num_pols, &num_xfrms); 3160 if (err < 0) 3161 goto dropdst; 3162 3163 if (num_pols) { 3164 if (num_xfrms <= 0) { 3165 drop_pols = num_pols; 3166 goto no_transform; 3167 } 3168 3169 xdst = xfrm_resolve_and_create_bundle( 3170 pols, num_pols, fl, 3171 family, dst_orig); 3172 3173 if (IS_ERR(xdst)) { 3174 xfrm_pols_put(pols, num_pols); 3175 err = PTR_ERR(xdst); 3176 if (err == -EREMOTE) 3177 goto nopol; 3178 3179 goto dropdst; 3180 } else if (xdst == NULL) { 3181 num_xfrms = 0; 3182 drop_pols = num_pols; 3183 goto no_transform; 3184 } 3185 3186 route = xdst->route; 3187 } 3188 } 3189 3190 if (xdst == NULL) { 3191 struct xfrm_flo xflo; 3192 3193 xflo.dst_orig = dst_orig; 3194 xflo.flags = flags; 3195 3196 /* To accelerate a bit... */ 3197 if (!if_id && ((dst_orig->flags & DST_NOXFRM) || 3198 !net->xfrm.policy_count[XFRM_POLICY_OUT])) 3199 goto nopol; 3200 3201 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id); 3202 if (xdst == NULL) 3203 goto nopol; 3204 if (IS_ERR(xdst)) { 3205 err = PTR_ERR(xdst); 3206 goto dropdst; 3207 } 3208 3209 num_pols = xdst->num_pols; 3210 num_xfrms = xdst->num_xfrms; 3211 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols); 3212 route = xdst->route; 3213 } 3214 3215 dst = &xdst->u.dst; 3216 if (route == NULL && num_xfrms > 0) { 3217 /* The only case when xfrm_bundle_lookup() returns a 3218 * bundle with null route, is when the template could 3219 * not be resolved. It means policies are there, but 3220 * bundle could not be created, since we don't yet 3221 * have the xfrm_state's. We need to wait for KM to 3222 * negotiate new SA's or bail out with error.*/ 3223 if (net->xfrm.sysctl_larval_drop) { 3224 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 3225 err = -EREMOTE; 3226 goto error; 3227 } 3228 3229 err = -EAGAIN; 3230 3231 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 3232 goto error; 3233 } 3234 3235 no_transform: 3236 if (num_pols == 0) 3237 goto nopol; 3238 3239 if ((flags & XFRM_LOOKUP_ICMP) && 3240 !(pols[0]->flags & XFRM_POLICY_ICMP)) { 3241 err = -ENOENT; 3242 goto error; 3243 } 3244 3245 for (i = 0; i < num_pols; i++) 3246 WRITE_ONCE(pols[i]->curlft.use_time, ktime_get_real_seconds()); 3247 3248 if (num_xfrms < 0) { 3249 /* Prohibit the flow */ 3250 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 3251 err = -EPERM; 3252 goto error; 3253 } else if (num_xfrms > 0) { 3254 /* Flow transformed */ 3255 dst_release(dst_orig); 3256 } else { 3257 /* Flow passes untransformed */ 3258 dst_release(dst); 3259 dst = dst_orig; 3260 } 3261 ok: 3262 xfrm_pols_put(pols, drop_pols); 3263 if (dst && dst->xfrm && 3264 dst->xfrm->props.mode == XFRM_MODE_TUNNEL) 3265 dst->flags |= DST_XFRM_TUNNEL; 3266 return dst; 3267 3268 nopol: 3269 if ((!dst_orig->dev || !(dst_orig->dev->flags & IFF_LOOPBACK)) && 3270 net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) { 3271 err = -EPERM; 3272 goto error; 3273 } 3274 if (!(flags & XFRM_LOOKUP_ICMP)) { 3275 dst = dst_orig; 3276 goto ok; 3277 } 3278 err = -ENOENT; 3279 error: 3280 dst_release(dst); 3281 dropdst: 3282 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF)) 3283 dst_release(dst_orig); 3284 xfrm_pols_put(pols, drop_pols); 3285 return ERR_PTR(err); 3286 } 3287 EXPORT_SYMBOL(xfrm_lookup_with_ifid); 3288 3289 /* Main function: finds/creates a bundle for given flow. 3290 * 3291 * At the moment we eat a raw IP route. Mostly to speed up lookups 3292 * on interfaces with disabled IPsec. 3293 */ 3294 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 3295 const struct flowi *fl, const struct sock *sk, 3296 int flags) 3297 { 3298 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0); 3299 } 3300 EXPORT_SYMBOL(xfrm_lookup); 3301 3302 /* Callers of xfrm_lookup_route() must ensure a call to dst_output(). 3303 * Otherwise we may send out blackholed packets. 3304 */ 3305 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, 3306 const struct flowi *fl, 3307 const struct sock *sk, int flags) 3308 { 3309 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, 3310 flags | XFRM_LOOKUP_QUEUE | 3311 XFRM_LOOKUP_KEEP_DST_REF); 3312 3313 if (PTR_ERR(dst) == -EREMOTE) 3314 return make_blackhole(net, dst_orig->ops->family, dst_orig); 3315 3316 if (IS_ERR(dst)) 3317 dst_release(dst_orig); 3318 3319 return dst; 3320 } 3321 EXPORT_SYMBOL(xfrm_lookup_route); 3322 3323 static inline int 3324 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) 3325 { 3326 struct sec_path *sp = skb_sec_path(skb); 3327 struct xfrm_state *x; 3328 3329 if (!sp || idx < 0 || idx >= sp->len) 3330 return 0; 3331 x = sp->xvec[idx]; 3332 if (!x->type->reject) 3333 return 0; 3334 return x->type->reject(x, skb, fl); 3335 } 3336 3337 /* When skb is transformed back to its "native" form, we have to 3338 * check policy restrictions. At the moment we make this in maximally 3339 * stupid way. Shame on me. :-) Of course, connected sockets must 3340 * have policy cached at them. 3341 */ 3342 3343 static inline int 3344 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, 3345 unsigned short family, u32 if_id) 3346 { 3347 if (xfrm_state_kern(x)) 3348 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 3349 return x->id.proto == tmpl->id.proto && 3350 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 3351 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 3352 x->props.mode == tmpl->mode && 3353 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 3354 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 3355 !(x->props.mode != XFRM_MODE_TRANSPORT && 3356 xfrm_state_addr_cmp(tmpl, x, family)) && 3357 (if_id == 0 || if_id == x->if_id); 3358 } 3359 3360 /* 3361 * 0 or more than 0 is returned when validation is succeeded (either bypass 3362 * because of optional transport mode, or next index of the matched secpath 3363 * state with the template. 3364 * -1 is returned when no matching template is found. 3365 * Otherwise "-2 - errored_index" is returned. 3366 */ 3367 static inline int 3368 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, 3369 unsigned short family, u32 if_id) 3370 { 3371 int idx = start; 3372 3373 if (tmpl->optional) { 3374 if (tmpl->mode == XFRM_MODE_TRANSPORT) 3375 return start; 3376 } else 3377 start = -1; 3378 for (; idx < sp->len; idx++) { 3379 if (xfrm_state_ok(tmpl, sp->xvec[idx], family, if_id)) 3380 return ++idx; 3381 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 3382 if (idx < sp->verified_cnt) { 3383 /* Secpath entry previously verified, consider optional and 3384 * continue searching 3385 */ 3386 continue; 3387 } 3388 3389 if (start == -1) 3390 start = -2-idx; 3391 break; 3392 } 3393 } 3394 return start; 3395 } 3396 3397 static void 3398 decode_session4(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse) 3399 { 3400 struct flowi4 *fl4 = &fl->u.ip4; 3401 3402 memset(fl4, 0, sizeof(struct flowi4)); 3403 3404 if (reverse) { 3405 fl4->saddr = flkeys->addrs.ipv4.dst; 3406 fl4->daddr = flkeys->addrs.ipv4.src; 3407 fl4->fl4_sport = flkeys->ports.dst; 3408 fl4->fl4_dport = flkeys->ports.src; 3409 } else { 3410 fl4->saddr = flkeys->addrs.ipv4.src; 3411 fl4->daddr = flkeys->addrs.ipv4.dst; 3412 fl4->fl4_sport = flkeys->ports.src; 3413 fl4->fl4_dport = flkeys->ports.dst; 3414 } 3415 3416 switch (flkeys->basic.ip_proto) { 3417 case IPPROTO_GRE: 3418 fl4->fl4_gre_key = flkeys->gre.keyid; 3419 break; 3420 case IPPROTO_ICMP: 3421 fl4->fl4_icmp_type = flkeys->icmp.type; 3422 fl4->fl4_icmp_code = flkeys->icmp.code; 3423 break; 3424 } 3425 3426 fl4->flowi4_proto = flkeys->basic.ip_proto; 3427 fl4->flowi4_tos = flkeys->ip.tos & ~INET_ECN_MASK; 3428 } 3429 3430 #if IS_ENABLED(CONFIG_IPV6) 3431 static void 3432 decode_session6(const struct xfrm_flow_keys *flkeys, struct flowi *fl, bool reverse) 3433 { 3434 struct flowi6 *fl6 = &fl->u.ip6; 3435 3436 memset(fl6, 0, sizeof(struct flowi6)); 3437 3438 if (reverse) { 3439 fl6->saddr = flkeys->addrs.ipv6.dst; 3440 fl6->daddr = flkeys->addrs.ipv6.src; 3441 fl6->fl6_sport = flkeys->ports.dst; 3442 fl6->fl6_dport = flkeys->ports.src; 3443 } else { 3444 fl6->saddr = flkeys->addrs.ipv6.src; 3445 fl6->daddr = flkeys->addrs.ipv6.dst; 3446 fl6->fl6_sport = flkeys->ports.src; 3447 fl6->fl6_dport = flkeys->ports.dst; 3448 } 3449 3450 switch (flkeys->basic.ip_proto) { 3451 case IPPROTO_GRE: 3452 fl6->fl6_gre_key = flkeys->gre.keyid; 3453 break; 3454 case IPPROTO_ICMPV6: 3455 fl6->fl6_icmp_type = flkeys->icmp.type; 3456 fl6->fl6_icmp_code = flkeys->icmp.code; 3457 break; 3458 } 3459 3460 fl6->flowi6_proto = flkeys->basic.ip_proto; 3461 } 3462 #endif 3463 3464 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, 3465 unsigned int family, int reverse) 3466 { 3467 struct xfrm_flow_keys flkeys; 3468 3469 memset(&flkeys, 0, sizeof(flkeys)); 3470 __skb_flow_dissect(net, skb, &xfrm_session_dissector, &flkeys, 3471 NULL, 0, 0, 0, FLOW_DISSECTOR_F_STOP_AT_ENCAP); 3472 3473 switch (family) { 3474 case AF_INET: 3475 decode_session4(&flkeys, fl, reverse); 3476 break; 3477 #if IS_ENABLED(CONFIG_IPV6) 3478 case AF_INET6: 3479 decode_session6(&flkeys, fl, reverse); 3480 break; 3481 #endif 3482 default: 3483 return -EAFNOSUPPORT; 3484 } 3485 3486 fl->flowi_mark = skb->mark; 3487 if (reverse) { 3488 fl->flowi_oif = skb->skb_iif; 3489 } else { 3490 int oif = 0; 3491 3492 if (skb_dst(skb) && skb_dst(skb)->dev) 3493 oif = skb_dst(skb)->dev->ifindex; 3494 3495 fl->flowi_oif = oif; 3496 } 3497 3498 return security_xfrm_decode_session(skb, &fl->flowi_secid); 3499 } 3500 EXPORT_SYMBOL(__xfrm_decode_session); 3501 3502 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) 3503 { 3504 for (; k < sp->len; k++) { 3505 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 3506 *idxp = k; 3507 return 1; 3508 } 3509 } 3510 3511 return 0; 3512 } 3513 3514 static bool icmp_err_packet(const struct flowi *fl, unsigned short family) 3515 { 3516 const struct flowi4 *fl4 = &fl->u.ip4; 3517 3518 if (family == AF_INET && 3519 fl4->flowi4_proto == IPPROTO_ICMP && 3520 (fl4->fl4_icmp_type == ICMP_DEST_UNREACH || 3521 fl4->fl4_icmp_type == ICMP_TIME_EXCEEDED)) 3522 return true; 3523 3524 #if IS_ENABLED(CONFIG_IPV6) 3525 if (family == AF_INET6) { 3526 const struct flowi6 *fl6 = &fl->u.ip6; 3527 3528 if (fl6->flowi6_proto == IPPROTO_ICMPV6 && 3529 (fl6->fl6_icmp_type == ICMPV6_DEST_UNREACH || 3530 fl6->fl6_icmp_type == ICMPV6_PKT_TOOBIG || 3531 fl6->fl6_icmp_type == ICMPV6_TIME_EXCEED)) 3532 return true; 3533 } 3534 #endif 3535 return false; 3536 } 3537 3538 static bool xfrm_icmp_flow_decode(struct sk_buff *skb, unsigned short family, 3539 const struct flowi *fl, struct flowi *fl1) 3540 { 3541 bool ret = true; 3542 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); 3543 int hl = family == AF_INET ? (sizeof(struct iphdr) + sizeof(struct icmphdr)) : 3544 (sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr)); 3545 3546 if (!newskb) 3547 return true; 3548 3549 if (!pskb_pull(newskb, hl)) 3550 goto out; 3551 3552 skb_reset_network_header(newskb); 3553 3554 if (xfrm_decode_session_reverse(dev_net(skb->dev), newskb, fl1, family) < 0) 3555 goto out; 3556 3557 fl1->flowi_oif = fl->flowi_oif; 3558 fl1->flowi_mark = fl->flowi_mark; 3559 fl1->flowi_tos = fl->flowi_tos; 3560 nf_nat_decode_session(newskb, fl1, family); 3561 ret = false; 3562 3563 out: 3564 consume_skb(newskb); 3565 return ret; 3566 } 3567 3568 static bool xfrm_selector_inner_icmp_match(struct sk_buff *skb, unsigned short family, 3569 const struct xfrm_selector *sel, 3570 const struct flowi *fl) 3571 { 3572 bool ret = false; 3573 3574 if (icmp_err_packet(fl, family)) { 3575 struct flowi fl1; 3576 3577 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1)) 3578 return ret; 3579 3580 ret = xfrm_selector_match(sel, &fl1, family); 3581 } 3582 3583 return ret; 3584 } 3585 3586 static inline struct 3587 xfrm_policy *xfrm_in_fwd_icmp(struct sk_buff *skb, 3588 const struct flowi *fl, unsigned short family, 3589 u32 if_id) 3590 { 3591 struct xfrm_policy *pol = NULL; 3592 3593 if (icmp_err_packet(fl, family)) { 3594 struct flowi fl1; 3595 struct net *net = dev_net(skb->dev); 3596 3597 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1)) 3598 return pol; 3599 3600 pol = xfrm_policy_lookup(net, &fl1, family, XFRM_POLICY_FWD, if_id); 3601 if (IS_ERR(pol)) 3602 pol = NULL; 3603 } 3604 3605 return pol; 3606 } 3607 3608 static inline struct 3609 dst_entry *xfrm_out_fwd_icmp(struct sk_buff *skb, struct flowi *fl, 3610 unsigned short family, struct dst_entry *dst) 3611 { 3612 if (icmp_err_packet(fl, family)) { 3613 struct net *net = dev_net(skb->dev); 3614 struct dst_entry *dst2; 3615 struct flowi fl1; 3616 3617 if (xfrm_icmp_flow_decode(skb, family, fl, &fl1)) 3618 return dst; 3619 3620 dst_hold(dst); 3621 3622 dst2 = xfrm_lookup(net, dst, &fl1, NULL, (XFRM_LOOKUP_QUEUE | XFRM_LOOKUP_ICMP)); 3623 3624 if (IS_ERR(dst2)) 3625 return dst; 3626 3627 if (dst2->xfrm) { 3628 dst_release(dst); 3629 dst = dst2; 3630 } else { 3631 dst_release(dst2); 3632 } 3633 } 3634 3635 return dst; 3636 } 3637 3638 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 3639 unsigned short family) 3640 { 3641 struct net *net = dev_net(skb->dev); 3642 struct xfrm_policy *pol; 3643 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 3644 int npols = 0; 3645 int xfrm_nr; 3646 int pi; 3647 int reverse; 3648 struct flowi fl; 3649 int xerr_idx = -1; 3650 const struct xfrm_if_cb *ifcb; 3651 struct sec_path *sp; 3652 u32 if_id = 0; 3653 3654 rcu_read_lock(); 3655 ifcb = xfrm_if_get_cb(); 3656 3657 if (ifcb) { 3658 struct xfrm_if_decode_session_result r; 3659 3660 if (ifcb->decode_session(skb, family, &r)) { 3661 if_id = r.if_id; 3662 net = r.net; 3663 } 3664 } 3665 rcu_read_unlock(); 3666 3667 reverse = dir & ~XFRM_POLICY_MASK; 3668 dir &= XFRM_POLICY_MASK; 3669 3670 if (__xfrm_decode_session(net, skb, &fl, family, reverse) < 0) { 3671 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 3672 return 0; 3673 } 3674 3675 nf_nat_decode_session(skb, &fl, family); 3676 3677 /* First, check used SA against their selectors. */ 3678 sp = skb_sec_path(skb); 3679 if (sp) { 3680 int i; 3681 3682 for (i = sp->len - 1; i >= 0; i--) { 3683 struct xfrm_state *x = sp->xvec[i]; 3684 int ret = 0; 3685 3686 if (!xfrm_selector_match(&x->sel, &fl, family)) { 3687 ret = 1; 3688 if (x->props.flags & XFRM_STATE_ICMP && 3689 xfrm_selector_inner_icmp_match(skb, family, &x->sel, &fl)) 3690 ret = 0; 3691 if (ret) { 3692 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 3693 return 0; 3694 } 3695 } 3696 } 3697 } 3698 3699 pol = NULL; 3700 sk = sk_to_full_sk(sk); 3701 if (sk && sk->sk_policy[dir]) { 3702 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id); 3703 if (IS_ERR(pol)) { 3704 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3705 return 0; 3706 } 3707 } 3708 3709 if (!pol) 3710 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id); 3711 3712 if (IS_ERR(pol)) { 3713 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3714 return 0; 3715 } 3716 3717 if (!pol && dir == XFRM_POLICY_FWD) 3718 pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id); 3719 3720 if (!pol) { 3721 const bool is_crypto_offload = sp && 3722 (xfrm_input_state(skb)->xso.type == XFRM_DEV_OFFLOAD_CRYPTO); 3723 3724 if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) { 3725 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 3726 return 0; 3727 } 3728 3729 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx) && !is_crypto_offload) { 3730 xfrm_secpath_reject(xerr_idx, skb, &fl); 3731 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 3732 return 0; 3733 } 3734 return 1; 3735 } 3736 3737 /* This lockless write can happen from different cpus. */ 3738 WRITE_ONCE(pol->curlft.use_time, ktime_get_real_seconds()); 3739 3740 pols[0] = pol; 3741 npols++; 3742 #ifdef CONFIG_XFRM_SUB_POLICY 3743 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 3744 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, 3745 &fl, family, 3746 XFRM_POLICY_IN, if_id); 3747 if (pols[1]) { 3748 if (IS_ERR(pols[1])) { 3749 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3750 xfrm_pol_put(pols[0]); 3751 return 0; 3752 } 3753 /* This write can happen from different cpus. */ 3754 WRITE_ONCE(pols[1]->curlft.use_time, 3755 ktime_get_real_seconds()); 3756 npols++; 3757 } 3758 } 3759 #endif 3760 3761 if (pol->action == XFRM_POLICY_ALLOW) { 3762 static struct sec_path dummy; 3763 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 3764 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 3765 struct xfrm_tmpl **tpp = tp; 3766 int ti = 0; 3767 int i, k; 3768 3769 sp = skb_sec_path(skb); 3770 if (!sp) 3771 sp = &dummy; 3772 3773 for (pi = 0; pi < npols; pi++) { 3774 if (pols[pi] != pol && 3775 pols[pi]->action != XFRM_POLICY_ALLOW) { 3776 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 3777 goto reject; 3778 } 3779 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 3780 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 3781 goto reject_error; 3782 } 3783 for (i = 0; i < pols[pi]->xfrm_nr; i++) 3784 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 3785 } 3786 xfrm_nr = ti; 3787 3788 if (npols > 1) { 3789 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); 3790 tpp = stp; 3791 } 3792 3793 /* For each tunnel xfrm, find the first matching tmpl. 3794 * For each tmpl before that, find corresponding xfrm. 3795 * Order is _important_. Later we will implement 3796 * some barriers, but at the moment barriers 3797 * are implied between each two transformations. 3798 * Upon success, marks secpath entries as having been 3799 * verified to allow them to be skipped in future policy 3800 * checks (e.g. nested tunnels). 3801 */ 3802 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 3803 k = xfrm_policy_ok(tpp[i], sp, k, family, if_id); 3804 if (k < 0) { 3805 if (k < -1) 3806 /* "-2 - errored_index" returned */ 3807 xerr_idx = -(2+k); 3808 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 3809 goto reject; 3810 } 3811 } 3812 3813 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 3814 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 3815 goto reject; 3816 } 3817 3818 xfrm_pols_put(pols, npols); 3819 sp->verified_cnt = k; 3820 3821 return 1; 3822 } 3823 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 3824 3825 reject: 3826 xfrm_secpath_reject(xerr_idx, skb, &fl); 3827 reject_error: 3828 xfrm_pols_put(pols, npols); 3829 return 0; 3830 } 3831 EXPORT_SYMBOL(__xfrm_policy_check); 3832 3833 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 3834 { 3835 struct net *net = dev_net(skb->dev); 3836 struct flowi fl; 3837 struct dst_entry *dst; 3838 int res = 1; 3839 3840 if (xfrm_decode_session(net, skb, &fl, family) < 0) { 3841 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 3842 return 0; 3843 } 3844 3845 skb_dst_force(skb); 3846 if (!skb_dst(skb)) { 3847 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 3848 return 0; 3849 } 3850 3851 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); 3852 if (IS_ERR(dst)) { 3853 res = 0; 3854 dst = NULL; 3855 } 3856 3857 if (dst && !dst->xfrm) 3858 dst = xfrm_out_fwd_icmp(skb, &fl, family, dst); 3859 3860 skb_dst_set(skb, dst); 3861 return res; 3862 } 3863 EXPORT_SYMBOL(__xfrm_route_forward); 3864 3865 /* Optimize later using cookies and generation ids. */ 3866 3867 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 3868 { 3869 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 3870 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to 3871 * get validated by dst_ops->check on every use. We do this 3872 * because when a normal route referenced by an XFRM dst is 3873 * obsoleted we do not go looking around for all parent 3874 * referencing XFRM dsts so that we can invalidate them. It 3875 * is just too much work. Instead we make the checks here on 3876 * every use. For example: 3877 * 3878 * XFRM dst A --> IPv4 dst X 3879 * 3880 * X is the "xdst->route" of A (X is also the "dst->path" of A 3881 * in this example). If X is marked obsolete, "A" will not 3882 * notice. That's what we are validating here via the 3883 * stale_bundle() check. 3884 * 3885 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will 3886 * be marked on it. 3887 * This will force stale_bundle() to fail on any xdst bundle with 3888 * this dst linked in it. 3889 */ 3890 if (dst->obsolete < 0 && !stale_bundle(dst)) 3891 return dst; 3892 3893 return NULL; 3894 } 3895 3896 static int stale_bundle(struct dst_entry *dst) 3897 { 3898 return !xfrm_bundle_ok((struct xfrm_dst *)dst); 3899 } 3900 3901 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 3902 { 3903 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) { 3904 dst->dev = blackhole_netdev; 3905 dev_hold(dst->dev); 3906 dev_put(dev); 3907 } 3908 } 3909 EXPORT_SYMBOL(xfrm_dst_ifdown); 3910 3911 static void xfrm_link_failure(struct sk_buff *skb) 3912 { 3913 /* Impossible. Such dst must be popped before reaches point of failure. */ 3914 } 3915 3916 static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst) 3917 { 3918 if (dst->obsolete) 3919 sk_dst_reset(sk); 3920 } 3921 3922 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr) 3923 { 3924 while (nr--) { 3925 struct xfrm_dst *xdst = bundle[nr]; 3926 u32 pmtu, route_mtu_cached; 3927 struct dst_entry *dst; 3928 3929 dst = &xdst->u.dst; 3930 pmtu = dst_mtu(xfrm_dst_child(dst)); 3931 xdst->child_mtu_cached = pmtu; 3932 3933 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 3934 3935 route_mtu_cached = dst_mtu(xdst->route); 3936 xdst->route_mtu_cached = route_mtu_cached; 3937 3938 if (pmtu > route_mtu_cached) 3939 pmtu = route_mtu_cached; 3940 3941 dst_metric_set(dst, RTAX_MTU, pmtu); 3942 } 3943 } 3944 3945 /* Check that the bundle accepts the flow and its components are 3946 * still valid. 3947 */ 3948 3949 static int xfrm_bundle_ok(struct xfrm_dst *first) 3950 { 3951 struct xfrm_dst *bundle[XFRM_MAX_DEPTH]; 3952 struct dst_entry *dst = &first->u.dst; 3953 struct xfrm_dst *xdst; 3954 int start_from, nr; 3955 u32 mtu; 3956 3957 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) || 3958 (dst->dev && !netif_running(dst->dev))) 3959 return 0; 3960 3961 if (dst->flags & DST_XFRM_QUEUE) 3962 return 1; 3963 3964 start_from = nr = 0; 3965 do { 3966 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 3967 3968 if (dst->xfrm->km.state != XFRM_STATE_VALID) 3969 return 0; 3970 if (xdst->xfrm_genid != dst->xfrm->genid) 3971 return 0; 3972 if (xdst->num_pols > 0 && 3973 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 3974 return 0; 3975 3976 bundle[nr++] = xdst; 3977 3978 mtu = dst_mtu(xfrm_dst_child(dst)); 3979 if (xdst->child_mtu_cached != mtu) { 3980 start_from = nr; 3981 xdst->child_mtu_cached = mtu; 3982 } 3983 3984 if (!dst_check(xdst->route, xdst->route_cookie)) 3985 return 0; 3986 mtu = dst_mtu(xdst->route); 3987 if (xdst->route_mtu_cached != mtu) { 3988 start_from = nr; 3989 xdst->route_mtu_cached = mtu; 3990 } 3991 3992 dst = xfrm_dst_child(dst); 3993 } while (dst->xfrm); 3994 3995 if (likely(!start_from)) 3996 return 1; 3997 3998 xdst = bundle[start_from - 1]; 3999 mtu = xdst->child_mtu_cached; 4000 while (start_from--) { 4001 dst = &xdst->u.dst; 4002 4003 mtu = xfrm_state_mtu(dst->xfrm, mtu); 4004 if (mtu > xdst->route_mtu_cached) 4005 mtu = xdst->route_mtu_cached; 4006 dst_metric_set(dst, RTAX_MTU, mtu); 4007 if (!start_from) 4008 break; 4009 4010 xdst = bundle[start_from - 1]; 4011 xdst->child_mtu_cached = mtu; 4012 } 4013 4014 return 1; 4015 } 4016 4017 static unsigned int xfrm_default_advmss(const struct dst_entry *dst) 4018 { 4019 return dst_metric_advmss(xfrm_dst_path(dst)); 4020 } 4021 4022 static unsigned int xfrm_mtu(const struct dst_entry *dst) 4023 { 4024 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 4025 4026 return mtu ? : dst_mtu(xfrm_dst_path(dst)); 4027 } 4028 4029 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst, 4030 const void *daddr) 4031 { 4032 while (dst->xfrm) { 4033 const struct xfrm_state *xfrm = dst->xfrm; 4034 4035 dst = xfrm_dst_child(dst); 4036 4037 if (xfrm->props.mode == XFRM_MODE_TRANSPORT) 4038 continue; 4039 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) 4040 daddr = xfrm->coaddr; 4041 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) 4042 daddr = &xfrm->id.daddr; 4043 } 4044 return daddr; 4045 } 4046 4047 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, 4048 struct sk_buff *skb, 4049 const void *daddr) 4050 { 4051 const struct dst_entry *path = xfrm_dst_path(dst); 4052 4053 if (!skb) 4054 daddr = xfrm_get_dst_nexthop(dst, daddr); 4055 return path->ops->neigh_lookup(path, skb, daddr); 4056 } 4057 4058 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr) 4059 { 4060 const struct dst_entry *path = xfrm_dst_path(dst); 4061 4062 daddr = xfrm_get_dst_nexthop(dst, daddr); 4063 path->ops->confirm_neigh(path, daddr); 4064 } 4065 4066 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family) 4067 { 4068 int err = 0; 4069 4070 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo))) 4071 return -EAFNOSUPPORT; 4072 4073 spin_lock(&xfrm_policy_afinfo_lock); 4074 if (unlikely(xfrm_policy_afinfo[family] != NULL)) 4075 err = -EEXIST; 4076 else { 4077 struct dst_ops *dst_ops = afinfo->dst_ops; 4078 if (likely(dst_ops->kmem_cachep == NULL)) 4079 dst_ops->kmem_cachep = xfrm_dst_cache; 4080 if (likely(dst_ops->check == NULL)) 4081 dst_ops->check = xfrm_dst_check; 4082 if (likely(dst_ops->default_advmss == NULL)) 4083 dst_ops->default_advmss = xfrm_default_advmss; 4084 if (likely(dst_ops->mtu == NULL)) 4085 dst_ops->mtu = xfrm_mtu; 4086 if (likely(dst_ops->negative_advice == NULL)) 4087 dst_ops->negative_advice = xfrm_negative_advice; 4088 if (likely(dst_ops->link_failure == NULL)) 4089 dst_ops->link_failure = xfrm_link_failure; 4090 if (likely(dst_ops->neigh_lookup == NULL)) 4091 dst_ops->neigh_lookup = xfrm_neigh_lookup; 4092 if (likely(!dst_ops->confirm_neigh)) 4093 dst_ops->confirm_neigh = xfrm_confirm_neigh; 4094 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo); 4095 } 4096 spin_unlock(&xfrm_policy_afinfo_lock); 4097 4098 return err; 4099 } 4100 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 4101 4102 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo) 4103 { 4104 struct dst_ops *dst_ops = afinfo->dst_ops; 4105 int i; 4106 4107 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) { 4108 if (xfrm_policy_afinfo[i] != afinfo) 4109 continue; 4110 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL); 4111 break; 4112 } 4113 4114 synchronize_rcu(); 4115 4116 dst_ops->kmem_cachep = NULL; 4117 dst_ops->check = NULL; 4118 dst_ops->negative_advice = NULL; 4119 dst_ops->link_failure = NULL; 4120 } 4121 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 4122 4123 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb) 4124 { 4125 spin_lock(&xfrm_if_cb_lock); 4126 rcu_assign_pointer(xfrm_if_cb, ifcb); 4127 spin_unlock(&xfrm_if_cb_lock); 4128 } 4129 EXPORT_SYMBOL(xfrm_if_register_cb); 4130 4131 void xfrm_if_unregister_cb(void) 4132 { 4133 RCU_INIT_POINTER(xfrm_if_cb, NULL); 4134 synchronize_rcu(); 4135 } 4136 EXPORT_SYMBOL(xfrm_if_unregister_cb); 4137 4138 #ifdef CONFIG_XFRM_STATISTICS 4139 static int __net_init xfrm_statistics_init(struct net *net) 4140 { 4141 int rv; 4142 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib); 4143 if (!net->mib.xfrm_statistics) 4144 return -ENOMEM; 4145 rv = xfrm_proc_init(net); 4146 if (rv < 0) 4147 free_percpu(net->mib.xfrm_statistics); 4148 return rv; 4149 } 4150 4151 static void xfrm_statistics_fini(struct net *net) 4152 { 4153 xfrm_proc_fini(net); 4154 free_percpu(net->mib.xfrm_statistics); 4155 } 4156 #else 4157 static int __net_init xfrm_statistics_init(struct net *net) 4158 { 4159 return 0; 4160 } 4161 4162 static void xfrm_statistics_fini(struct net *net) 4163 { 4164 } 4165 #endif 4166 4167 static int __net_init xfrm_policy_init(struct net *net) 4168 { 4169 unsigned int hmask, sz; 4170 int dir, err; 4171 4172 if (net_eq(net, &init_net)) { 4173 xfrm_dst_cache = KMEM_CACHE(xfrm_dst, SLAB_HWCACHE_ALIGN | SLAB_PANIC); 4174 err = rhashtable_init(&xfrm_policy_inexact_table, 4175 &xfrm_pol_inexact_params); 4176 BUG_ON(err); 4177 } 4178 4179 hmask = 8 - 1; 4180 sz = (hmask+1) * sizeof(struct hlist_head); 4181 4182 net->xfrm.policy_byidx = xfrm_hash_alloc(sz); 4183 if (!net->xfrm.policy_byidx) 4184 goto out_byidx; 4185 net->xfrm.policy_idx_hmask = hmask; 4186 4187 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 4188 struct xfrm_policy_hash *htab; 4189 4190 net->xfrm.policy_count[dir] = 0; 4191 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0; 4192 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 4193 4194 htab = &net->xfrm.policy_bydst[dir]; 4195 htab->table = xfrm_hash_alloc(sz); 4196 if (!htab->table) 4197 goto out_bydst; 4198 htab->hmask = hmask; 4199 htab->dbits4 = 32; 4200 htab->sbits4 = 32; 4201 htab->dbits6 = 128; 4202 htab->sbits6 = 128; 4203 } 4204 net->xfrm.policy_hthresh.lbits4 = 32; 4205 net->xfrm.policy_hthresh.rbits4 = 32; 4206 net->xfrm.policy_hthresh.lbits6 = 128; 4207 net->xfrm.policy_hthresh.rbits6 = 128; 4208 4209 seqlock_init(&net->xfrm.policy_hthresh.lock); 4210 4211 INIT_LIST_HEAD(&net->xfrm.policy_all); 4212 INIT_LIST_HEAD(&net->xfrm.inexact_bins); 4213 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 4214 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild); 4215 return 0; 4216 4217 out_bydst: 4218 for (dir--; dir >= 0; dir--) { 4219 struct xfrm_policy_hash *htab; 4220 4221 htab = &net->xfrm.policy_bydst[dir]; 4222 xfrm_hash_free(htab->table, sz); 4223 } 4224 xfrm_hash_free(net->xfrm.policy_byidx, sz); 4225 out_byidx: 4226 return -ENOMEM; 4227 } 4228 4229 static void xfrm_policy_fini(struct net *net) 4230 { 4231 struct xfrm_pol_inexact_bin *b, *t; 4232 unsigned int sz; 4233 int dir; 4234 4235 flush_work(&net->xfrm.policy_hash_work); 4236 #ifdef CONFIG_XFRM_SUB_POLICY 4237 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false); 4238 #endif 4239 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false); 4240 4241 WARN_ON(!list_empty(&net->xfrm.policy_all)); 4242 4243 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 4244 struct xfrm_policy_hash *htab; 4245 4246 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 4247 4248 htab = &net->xfrm.policy_bydst[dir]; 4249 sz = (htab->hmask + 1) * sizeof(struct hlist_head); 4250 WARN_ON(!hlist_empty(htab->table)); 4251 xfrm_hash_free(htab->table, sz); 4252 } 4253 4254 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); 4255 WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); 4256 xfrm_hash_free(net->xfrm.policy_byidx, sz); 4257 4258 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 4259 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins) 4260 __xfrm_policy_inexact_prune_bin(b, true); 4261 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 4262 } 4263 4264 static int __net_init xfrm_net_init(struct net *net) 4265 { 4266 int rv; 4267 4268 /* Initialize the per-net locks here */ 4269 spin_lock_init(&net->xfrm.xfrm_state_lock); 4270 spin_lock_init(&net->xfrm.xfrm_policy_lock); 4271 seqcount_spinlock_init(&net->xfrm.xfrm_policy_hash_generation, &net->xfrm.xfrm_policy_lock); 4272 mutex_init(&net->xfrm.xfrm_cfg_mutex); 4273 net->xfrm.policy_default[XFRM_POLICY_IN] = XFRM_USERPOLICY_ACCEPT; 4274 net->xfrm.policy_default[XFRM_POLICY_FWD] = XFRM_USERPOLICY_ACCEPT; 4275 net->xfrm.policy_default[XFRM_POLICY_OUT] = XFRM_USERPOLICY_ACCEPT; 4276 4277 rv = xfrm_statistics_init(net); 4278 if (rv < 0) 4279 goto out_statistics; 4280 rv = xfrm_state_init(net); 4281 if (rv < 0) 4282 goto out_state; 4283 rv = xfrm_policy_init(net); 4284 if (rv < 0) 4285 goto out_policy; 4286 rv = xfrm_sysctl_init(net); 4287 if (rv < 0) 4288 goto out_sysctl; 4289 4290 rv = xfrm_nat_keepalive_net_init(net); 4291 if (rv < 0) 4292 goto out_nat_keepalive; 4293 4294 return 0; 4295 4296 out_nat_keepalive: 4297 xfrm_sysctl_fini(net); 4298 out_sysctl: 4299 xfrm_policy_fini(net); 4300 out_policy: 4301 xfrm_state_fini(net); 4302 out_state: 4303 xfrm_statistics_fini(net); 4304 out_statistics: 4305 return rv; 4306 } 4307 4308 static void __net_exit xfrm_net_exit(struct net *net) 4309 { 4310 xfrm_nat_keepalive_net_fini(net); 4311 xfrm_sysctl_fini(net); 4312 xfrm_policy_fini(net); 4313 xfrm_state_fini(net); 4314 xfrm_statistics_fini(net); 4315 } 4316 4317 static struct pernet_operations __net_initdata xfrm_net_ops = { 4318 .init = xfrm_net_init, 4319 .exit = xfrm_net_exit, 4320 }; 4321 4322 static const struct flow_dissector_key xfrm_flow_dissector_keys[] = { 4323 { 4324 .key_id = FLOW_DISSECTOR_KEY_CONTROL, 4325 .offset = offsetof(struct xfrm_flow_keys, control), 4326 }, 4327 { 4328 .key_id = FLOW_DISSECTOR_KEY_BASIC, 4329 .offset = offsetof(struct xfrm_flow_keys, basic), 4330 }, 4331 { 4332 .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS, 4333 .offset = offsetof(struct xfrm_flow_keys, addrs.ipv4), 4334 }, 4335 { 4336 .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS, 4337 .offset = offsetof(struct xfrm_flow_keys, addrs.ipv6), 4338 }, 4339 { 4340 .key_id = FLOW_DISSECTOR_KEY_PORTS, 4341 .offset = offsetof(struct xfrm_flow_keys, ports), 4342 }, 4343 { 4344 .key_id = FLOW_DISSECTOR_KEY_GRE_KEYID, 4345 .offset = offsetof(struct xfrm_flow_keys, gre), 4346 }, 4347 { 4348 .key_id = FLOW_DISSECTOR_KEY_IP, 4349 .offset = offsetof(struct xfrm_flow_keys, ip), 4350 }, 4351 { 4352 .key_id = FLOW_DISSECTOR_KEY_ICMP, 4353 .offset = offsetof(struct xfrm_flow_keys, icmp), 4354 }, 4355 }; 4356 4357 void __init xfrm_init(void) 4358 { 4359 skb_flow_dissector_init(&xfrm_session_dissector, 4360 xfrm_flow_dissector_keys, 4361 ARRAY_SIZE(xfrm_flow_dissector_keys)); 4362 4363 register_pernet_subsys(&xfrm_net_ops); 4364 xfrm_dev_init(); 4365 xfrm_input_init(); 4366 4367 #ifdef CONFIG_XFRM_ESPINTCP 4368 espintcp_init(); 4369 #endif 4370 4371 register_xfrm_state_bpf(); 4372 xfrm_nat_keepalive_init(AF_INET); 4373 } 4374 4375 #ifdef CONFIG_AUDITSYSCALL 4376 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 4377 struct audit_buffer *audit_buf) 4378 { 4379 struct xfrm_sec_ctx *ctx = xp->security; 4380 struct xfrm_selector *sel = &xp->selector; 4381 4382 if (ctx) 4383 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 4384 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 4385 4386 switch (sel->family) { 4387 case AF_INET: 4388 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); 4389 if (sel->prefixlen_s != 32) 4390 audit_log_format(audit_buf, " src_prefixlen=%d", 4391 sel->prefixlen_s); 4392 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); 4393 if (sel->prefixlen_d != 32) 4394 audit_log_format(audit_buf, " dst_prefixlen=%d", 4395 sel->prefixlen_d); 4396 break; 4397 case AF_INET6: 4398 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); 4399 if (sel->prefixlen_s != 128) 4400 audit_log_format(audit_buf, " src_prefixlen=%d", 4401 sel->prefixlen_s); 4402 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); 4403 if (sel->prefixlen_d != 128) 4404 audit_log_format(audit_buf, " dst_prefixlen=%d", 4405 sel->prefixlen_d); 4406 break; 4407 } 4408 } 4409 4410 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid) 4411 { 4412 struct audit_buffer *audit_buf; 4413 4414 audit_buf = xfrm_audit_start("SPD-add"); 4415 if (audit_buf == NULL) 4416 return; 4417 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 4418 audit_log_format(audit_buf, " res=%u", result); 4419 xfrm_audit_common_policyinfo(xp, audit_buf); 4420 audit_log_end(audit_buf); 4421 } 4422 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 4423 4424 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 4425 bool task_valid) 4426 { 4427 struct audit_buffer *audit_buf; 4428 4429 audit_buf = xfrm_audit_start("SPD-delete"); 4430 if (audit_buf == NULL) 4431 return; 4432 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 4433 audit_log_format(audit_buf, " res=%u", result); 4434 xfrm_audit_common_policyinfo(xp, audit_buf); 4435 audit_log_end(audit_buf); 4436 } 4437 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 4438 #endif 4439 4440 #ifdef CONFIG_XFRM_MIGRATE 4441 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 4442 const struct xfrm_selector *sel_tgt) 4443 { 4444 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 4445 if (sel_tgt->family == sel_cmp->family && 4446 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr, 4447 sel_cmp->family) && 4448 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr, 4449 sel_cmp->family) && 4450 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 4451 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 4452 return true; 4453 } 4454 } else { 4455 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 4456 return true; 4457 } 4458 } 4459 return false; 4460 } 4461 4462 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel, 4463 u8 dir, u8 type, struct net *net, u32 if_id) 4464 { 4465 struct xfrm_policy *pol, *ret = NULL; 4466 struct hlist_head *chain; 4467 u32 priority = ~0U; 4468 4469 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 4470 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir); 4471 hlist_for_each_entry(pol, chain, bydst) { 4472 if ((if_id == 0 || pol->if_id == if_id) && 4473 xfrm_migrate_selector_match(sel, &pol->selector) && 4474 pol->type == type) { 4475 ret = pol; 4476 priority = ret->priority; 4477 break; 4478 } 4479 } 4480 chain = &net->xfrm.policy_inexact[dir]; 4481 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 4482 if ((pol->priority >= priority) && ret) 4483 break; 4484 4485 if ((if_id == 0 || pol->if_id == if_id) && 4486 xfrm_migrate_selector_match(sel, &pol->selector) && 4487 pol->type == type) { 4488 ret = pol; 4489 break; 4490 } 4491 } 4492 4493 xfrm_pol_hold(ret); 4494 4495 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 4496 4497 return ret; 4498 } 4499 4500 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) 4501 { 4502 int match = 0; 4503 4504 if (t->mode == m->mode && t->id.proto == m->proto && 4505 (m->reqid == 0 || t->reqid == m->reqid)) { 4506 switch (t->mode) { 4507 case XFRM_MODE_TUNNEL: 4508 case XFRM_MODE_BEET: 4509 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr, 4510 m->old_family) && 4511 xfrm_addr_equal(&t->saddr, &m->old_saddr, 4512 m->old_family)) { 4513 match = 1; 4514 } 4515 break; 4516 case XFRM_MODE_TRANSPORT: 4517 /* in case of transport mode, template does not store 4518 any IP addresses, hence we just compare mode and 4519 protocol */ 4520 match = 1; 4521 break; 4522 default: 4523 break; 4524 } 4525 } 4526 return match; 4527 } 4528 4529 /* update endpoint address(es) of template(s) */ 4530 static int xfrm_policy_migrate(struct xfrm_policy *pol, 4531 struct xfrm_migrate *m, int num_migrate, 4532 struct netlink_ext_ack *extack) 4533 { 4534 struct xfrm_migrate *mp; 4535 int i, j, n = 0; 4536 4537 write_lock_bh(&pol->lock); 4538 if (unlikely(pol->walk.dead)) { 4539 /* target policy has been deleted */ 4540 NL_SET_ERR_MSG(extack, "Target policy not found"); 4541 write_unlock_bh(&pol->lock); 4542 return -ENOENT; 4543 } 4544 4545 for (i = 0; i < pol->xfrm_nr; i++) { 4546 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 4547 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 4548 continue; 4549 n++; 4550 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 4551 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 4552 continue; 4553 /* update endpoints */ 4554 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 4555 sizeof(pol->xfrm_vec[i].id.daddr)); 4556 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 4557 sizeof(pol->xfrm_vec[i].saddr)); 4558 pol->xfrm_vec[i].encap_family = mp->new_family; 4559 /* flush bundles */ 4560 atomic_inc(&pol->genid); 4561 } 4562 } 4563 4564 write_unlock_bh(&pol->lock); 4565 4566 if (!n) 4567 return -ENODATA; 4568 4569 return 0; 4570 } 4571 4572 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate, 4573 struct netlink_ext_ack *extack) 4574 { 4575 int i, j; 4576 4577 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) { 4578 NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)"); 4579 return -EINVAL; 4580 } 4581 4582 for (i = 0; i < num_migrate; i++) { 4583 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 4584 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) { 4585 NL_SET_ERR_MSG(extack, "Addresses in the MIGRATE attribute's list cannot be null"); 4586 return -EINVAL; 4587 } 4588 4589 /* check if there is any duplicated entry */ 4590 for (j = i + 1; j < num_migrate; j++) { 4591 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 4592 sizeof(m[i].old_daddr)) && 4593 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 4594 sizeof(m[i].old_saddr)) && 4595 m[i].proto == m[j].proto && 4596 m[i].mode == m[j].mode && 4597 m[i].reqid == m[j].reqid && 4598 m[i].old_family == m[j].old_family) { 4599 NL_SET_ERR_MSG(extack, "Entries in the MIGRATE attribute's list must be unique"); 4600 return -EINVAL; 4601 } 4602 } 4603 } 4604 4605 return 0; 4606 } 4607 4608 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 4609 struct xfrm_migrate *m, int num_migrate, 4610 struct xfrm_kmaddress *k, struct net *net, 4611 struct xfrm_encap_tmpl *encap, u32 if_id, 4612 struct netlink_ext_ack *extack) 4613 { 4614 int i, err, nx_cur = 0, nx_new = 0; 4615 struct xfrm_policy *pol = NULL; 4616 struct xfrm_state *x, *xc; 4617 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 4618 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 4619 struct xfrm_migrate *mp; 4620 4621 /* Stage 0 - sanity checks */ 4622 err = xfrm_migrate_check(m, num_migrate, extack); 4623 if (err < 0) 4624 goto out; 4625 4626 if (dir >= XFRM_POLICY_MAX) { 4627 NL_SET_ERR_MSG(extack, "Invalid policy direction"); 4628 err = -EINVAL; 4629 goto out; 4630 } 4631 4632 /* Stage 1 - find policy */ 4633 pol = xfrm_migrate_policy_find(sel, dir, type, net, if_id); 4634 if (!pol) { 4635 NL_SET_ERR_MSG(extack, "Target policy not found"); 4636 err = -ENOENT; 4637 goto out; 4638 } 4639 4640 /* Stage 2 - find and update state(s) */ 4641 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 4642 if ((x = xfrm_migrate_state_find(mp, net, if_id))) { 4643 x_cur[nx_cur] = x; 4644 nx_cur++; 4645 xc = xfrm_state_migrate(x, mp, encap); 4646 if (xc) { 4647 x_new[nx_new] = xc; 4648 nx_new++; 4649 } else { 4650 err = -ENODATA; 4651 goto restore_state; 4652 } 4653 } 4654 } 4655 4656 /* Stage 3 - update policy */ 4657 err = xfrm_policy_migrate(pol, m, num_migrate, extack); 4658 if (err < 0) 4659 goto restore_state; 4660 4661 /* Stage 4 - delete old state(s) */ 4662 if (nx_cur) { 4663 xfrm_states_put(x_cur, nx_cur); 4664 xfrm_states_delete(x_cur, nx_cur); 4665 } 4666 4667 /* Stage 5 - announce */ 4668 km_migrate(sel, dir, type, m, num_migrate, k, encap); 4669 4670 xfrm_pol_put(pol); 4671 4672 return 0; 4673 out: 4674 return err; 4675 4676 restore_state: 4677 if (pol) 4678 xfrm_pol_put(pol); 4679 if (nx_cur) 4680 xfrm_states_put(x_cur, nx_cur); 4681 if (nx_new) 4682 xfrm_states_delete(x_new, nx_new); 4683 4684 return err; 4685 } 4686 EXPORT_SYMBOL(xfrm_migrate); 4687 #endif 4688