1 /* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/kmod.h> 19 #include <linux/list.h> 20 #include <linux/spinlock.h> 21 #include <linux/workqueue.h> 22 #include <linux/notifier.h> 23 #include <linux/netdevice.h> 24 #include <linux/netfilter.h> 25 #include <linux/module.h> 26 #include <linux/cache.h> 27 #include <linux/cpu.h> 28 #include <linux/audit.h> 29 #include <linux/rhashtable.h> 30 #include <net/dst.h> 31 #include <net/flow.h> 32 #include <net/xfrm.h> 33 #include <net/ip.h> 34 #ifdef CONFIG_XFRM_STATISTICS 35 #include <net/snmp.h> 36 #endif 37 38 #include "xfrm_hash.h" 39 40 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10)) 41 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) 42 #define XFRM_MAX_QUEUE_LEN 100 43 44 struct xfrm_flo { 45 struct dst_entry *dst_orig; 46 u8 flags; 47 }; 48 49 /* prefixes smaller than this are stored in lists, not trees. */ 50 #define INEXACT_PREFIXLEN_IPV4 16 51 #define INEXACT_PREFIXLEN_IPV6 48 52 53 struct xfrm_pol_inexact_node { 54 struct rb_node node; 55 union { 56 xfrm_address_t addr; 57 struct rcu_head rcu; 58 }; 59 u8 prefixlen; 60 61 struct rb_root root; 62 63 /* the policies matching this node, can be empty list */ 64 struct hlist_head hhead; 65 }; 66 67 /* xfrm inexact policy search tree: 68 * xfrm_pol_inexact_bin = hash(dir,type,family,if_id); 69 * | 70 * +---- root_d: sorted by daddr:prefix 71 * | | 72 * | xfrm_pol_inexact_node 73 * | | 74 * | +- root: sorted by saddr/prefix 75 * | | | 76 * | | xfrm_pol_inexact_node 77 * | | | 78 * | | + root: unused 79 * | | | 80 * | | + hhead: saddr:daddr policies 81 * | | 82 * | +- coarse policies and all any:daddr policies 83 * | 84 * +---- root_s: sorted by saddr:prefix 85 * | | 86 * | xfrm_pol_inexact_node 87 * | | 88 * | + root: unused 89 * | | 90 * | + hhead: saddr:any policies 91 * | 92 * +---- coarse policies and all any:any policies 93 * 94 * Lookups return four candidate lists: 95 * 1. any:any list from top-level xfrm_pol_inexact_bin 96 * 2. any:daddr list from daddr tree 97 * 3. saddr:daddr list from 2nd level daddr tree 98 * 4. saddr:any list from saddr tree 99 * 100 * This result set then needs to be searched for the policy with 101 * the lowest priority. If two results have same prio, youngest one wins. 102 */ 103 104 struct xfrm_pol_inexact_key { 105 possible_net_t net; 106 u32 if_id; 107 u16 family; 108 u8 dir, type; 109 }; 110 111 struct xfrm_pol_inexact_bin { 112 struct xfrm_pol_inexact_key k; 113 struct rhash_head head; 114 /* list containing '*:*' policies */ 115 struct hlist_head hhead; 116 117 seqcount_t count; 118 /* tree sorted by daddr/prefix */ 119 struct rb_root root_d; 120 121 /* tree sorted by saddr/prefix */ 122 struct rb_root root_s; 123 124 /* slow path below */ 125 struct list_head inexact_bins; 126 struct rcu_head rcu; 127 }; 128 129 enum xfrm_pol_inexact_candidate_type { 130 XFRM_POL_CAND_BOTH, 131 XFRM_POL_CAND_SADDR, 132 XFRM_POL_CAND_DADDR, 133 XFRM_POL_CAND_ANY, 134 135 XFRM_POL_CAND_MAX, 136 }; 137 138 struct xfrm_pol_inexact_candidates { 139 struct hlist_head *res[XFRM_POL_CAND_MAX]; 140 }; 141 142 static DEFINE_SPINLOCK(xfrm_if_cb_lock); 143 static struct xfrm_if_cb const __rcu *xfrm_if_cb __read_mostly; 144 145 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); 146 static struct xfrm_policy_afinfo const __rcu *xfrm_policy_afinfo[AF_INET6 + 1] 147 __read_mostly; 148 149 static struct kmem_cache *xfrm_dst_cache __ro_after_init; 150 static __read_mostly seqcount_t xfrm_policy_hash_generation; 151 152 static struct rhashtable xfrm_policy_inexact_table; 153 static const struct rhashtable_params xfrm_pol_inexact_params; 154 155 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr); 156 static int stale_bundle(struct dst_entry *dst); 157 static int xfrm_bundle_ok(struct xfrm_dst *xdst); 158 static void xfrm_policy_queue_process(struct timer_list *t); 159 160 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir); 161 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 162 int dir); 163 164 static struct xfrm_pol_inexact_bin * 165 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, u8 dir, 166 u32 if_id); 167 168 static struct xfrm_pol_inexact_bin * 169 xfrm_policy_inexact_lookup_rcu(struct net *net, 170 u8 type, u16 family, u8 dir, u32 if_id); 171 static struct xfrm_policy * 172 xfrm_policy_insert_list(struct hlist_head *chain, struct xfrm_policy *policy, 173 bool excl); 174 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, 175 struct xfrm_policy *policy); 176 177 static bool 178 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand, 179 struct xfrm_pol_inexact_bin *b, 180 const xfrm_address_t *saddr, 181 const xfrm_address_t *daddr); 182 183 static inline bool xfrm_pol_hold_rcu(struct xfrm_policy *policy) 184 { 185 return refcount_inc_not_zero(&policy->refcnt); 186 } 187 188 static inline bool 189 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 190 { 191 const struct flowi4 *fl4 = &fl->u.ip4; 192 193 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && 194 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && 195 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && 196 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && 197 (fl4->flowi4_proto == sel->proto || !sel->proto) && 198 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 199 } 200 201 static inline bool 202 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 203 { 204 const struct flowi6 *fl6 = &fl->u.ip6; 205 206 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && 207 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && 208 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && 209 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && 210 (fl6->flowi6_proto == sel->proto || !sel->proto) && 211 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 212 } 213 214 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 215 unsigned short family) 216 { 217 switch (family) { 218 case AF_INET: 219 return __xfrm4_selector_match(sel, fl); 220 case AF_INET6: 221 return __xfrm6_selector_match(sel, fl); 222 } 223 return false; 224 } 225 226 static const struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 227 { 228 const struct xfrm_policy_afinfo *afinfo; 229 230 if (unlikely(family >= ARRAY_SIZE(xfrm_policy_afinfo))) 231 return NULL; 232 rcu_read_lock(); 233 afinfo = rcu_dereference(xfrm_policy_afinfo[family]); 234 if (unlikely(!afinfo)) 235 rcu_read_unlock(); 236 return afinfo; 237 } 238 239 /* Called with rcu_read_lock(). */ 240 static const struct xfrm_if_cb *xfrm_if_get_cb(void) 241 { 242 return rcu_dereference(xfrm_if_cb); 243 } 244 245 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, 246 const xfrm_address_t *saddr, 247 const xfrm_address_t *daddr, 248 int family, u32 mark) 249 { 250 const struct xfrm_policy_afinfo *afinfo; 251 struct dst_entry *dst; 252 253 afinfo = xfrm_policy_get_afinfo(family); 254 if (unlikely(afinfo == NULL)) 255 return ERR_PTR(-EAFNOSUPPORT); 256 257 dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark); 258 259 rcu_read_unlock(); 260 261 return dst; 262 } 263 EXPORT_SYMBOL(__xfrm_dst_lookup); 264 265 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, 266 int tos, int oif, 267 xfrm_address_t *prev_saddr, 268 xfrm_address_t *prev_daddr, 269 int family, u32 mark) 270 { 271 struct net *net = xs_net(x); 272 xfrm_address_t *saddr = &x->props.saddr; 273 xfrm_address_t *daddr = &x->id.daddr; 274 struct dst_entry *dst; 275 276 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 277 saddr = x->coaddr; 278 daddr = prev_daddr; 279 } 280 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 281 saddr = prev_saddr; 282 daddr = x->coaddr; 283 } 284 285 dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark); 286 287 if (!IS_ERR(dst)) { 288 if (prev_saddr != saddr) 289 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 290 if (prev_daddr != daddr) 291 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 292 } 293 294 return dst; 295 } 296 297 static inline unsigned long make_jiffies(long secs) 298 { 299 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 300 return MAX_SCHEDULE_TIMEOUT-1; 301 else 302 return secs*HZ; 303 } 304 305 static void xfrm_policy_timer(struct timer_list *t) 306 { 307 struct xfrm_policy *xp = from_timer(xp, t, timer); 308 time64_t now = ktime_get_real_seconds(); 309 time64_t next = TIME64_MAX; 310 int warn = 0; 311 int dir; 312 313 read_lock(&xp->lock); 314 315 if (unlikely(xp->walk.dead)) 316 goto out; 317 318 dir = xfrm_policy_id2dir(xp->index); 319 320 if (xp->lft.hard_add_expires_seconds) { 321 time64_t tmo = xp->lft.hard_add_expires_seconds + 322 xp->curlft.add_time - now; 323 if (tmo <= 0) 324 goto expired; 325 if (tmo < next) 326 next = tmo; 327 } 328 if (xp->lft.hard_use_expires_seconds) { 329 time64_t tmo = xp->lft.hard_use_expires_seconds + 330 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 331 if (tmo <= 0) 332 goto expired; 333 if (tmo < next) 334 next = tmo; 335 } 336 if (xp->lft.soft_add_expires_seconds) { 337 time64_t tmo = xp->lft.soft_add_expires_seconds + 338 xp->curlft.add_time - now; 339 if (tmo <= 0) { 340 warn = 1; 341 tmo = XFRM_KM_TIMEOUT; 342 } 343 if (tmo < next) 344 next = tmo; 345 } 346 if (xp->lft.soft_use_expires_seconds) { 347 time64_t tmo = xp->lft.soft_use_expires_seconds + 348 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 349 if (tmo <= 0) { 350 warn = 1; 351 tmo = XFRM_KM_TIMEOUT; 352 } 353 if (tmo < next) 354 next = tmo; 355 } 356 357 if (warn) 358 km_policy_expired(xp, dir, 0, 0); 359 if (next != TIME64_MAX && 360 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 361 xfrm_pol_hold(xp); 362 363 out: 364 read_unlock(&xp->lock); 365 xfrm_pol_put(xp); 366 return; 367 368 expired: 369 read_unlock(&xp->lock); 370 if (!xfrm_policy_delete(xp, dir)) 371 km_policy_expired(xp, dir, 1, 0); 372 xfrm_pol_put(xp); 373 } 374 375 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 376 * SPD calls. 377 */ 378 379 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) 380 { 381 struct xfrm_policy *policy; 382 383 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 384 385 if (policy) { 386 write_pnet(&policy->xp_net, net); 387 INIT_LIST_HEAD(&policy->walk.all); 388 INIT_HLIST_NODE(&policy->bydst_inexact_list); 389 INIT_HLIST_NODE(&policy->bydst); 390 INIT_HLIST_NODE(&policy->byidx); 391 rwlock_init(&policy->lock); 392 refcount_set(&policy->refcnt, 1); 393 skb_queue_head_init(&policy->polq.hold_queue); 394 timer_setup(&policy->timer, xfrm_policy_timer, 0); 395 timer_setup(&policy->polq.hold_timer, 396 xfrm_policy_queue_process, 0); 397 } 398 return policy; 399 } 400 EXPORT_SYMBOL(xfrm_policy_alloc); 401 402 static void xfrm_policy_destroy_rcu(struct rcu_head *head) 403 { 404 struct xfrm_policy *policy = container_of(head, struct xfrm_policy, rcu); 405 406 security_xfrm_policy_free(policy->security); 407 kfree(policy); 408 } 409 410 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 411 412 void xfrm_policy_destroy(struct xfrm_policy *policy) 413 { 414 BUG_ON(!policy->walk.dead); 415 416 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) 417 BUG(); 418 419 call_rcu(&policy->rcu, xfrm_policy_destroy_rcu); 420 } 421 EXPORT_SYMBOL(xfrm_policy_destroy); 422 423 /* Rule must be locked. Release descendant resources, announce 424 * entry dead. The rule must be unlinked from lists to the moment. 425 */ 426 427 static void xfrm_policy_kill(struct xfrm_policy *policy) 428 { 429 policy->walk.dead = 1; 430 431 atomic_inc(&policy->genid); 432 433 if (del_timer(&policy->polq.hold_timer)) 434 xfrm_pol_put(policy); 435 skb_queue_purge(&policy->polq.hold_queue); 436 437 if (del_timer(&policy->timer)) 438 xfrm_pol_put(policy); 439 440 xfrm_pol_put(policy); 441 } 442 443 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 444 445 static inline unsigned int idx_hash(struct net *net, u32 index) 446 { 447 return __idx_hash(index, net->xfrm.policy_idx_hmask); 448 } 449 450 /* calculate policy hash thresholds */ 451 static void __get_hash_thresh(struct net *net, 452 unsigned short family, int dir, 453 u8 *dbits, u8 *sbits) 454 { 455 switch (family) { 456 case AF_INET: 457 *dbits = net->xfrm.policy_bydst[dir].dbits4; 458 *sbits = net->xfrm.policy_bydst[dir].sbits4; 459 break; 460 461 case AF_INET6: 462 *dbits = net->xfrm.policy_bydst[dir].dbits6; 463 *sbits = net->xfrm.policy_bydst[dir].sbits6; 464 break; 465 466 default: 467 *dbits = 0; 468 *sbits = 0; 469 } 470 } 471 472 static struct hlist_head *policy_hash_bysel(struct net *net, 473 const struct xfrm_selector *sel, 474 unsigned short family, int dir) 475 { 476 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 477 unsigned int hash; 478 u8 dbits; 479 u8 sbits; 480 481 __get_hash_thresh(net, family, dir, &dbits, &sbits); 482 hash = __sel_hash(sel, family, hmask, dbits, sbits); 483 484 if (hash == hmask + 1) 485 return NULL; 486 487 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table, 488 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash; 489 } 490 491 static struct hlist_head *policy_hash_direct(struct net *net, 492 const xfrm_address_t *daddr, 493 const xfrm_address_t *saddr, 494 unsigned short family, int dir) 495 { 496 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 497 unsigned int hash; 498 u8 dbits; 499 u8 sbits; 500 501 __get_hash_thresh(net, family, dir, &dbits, &sbits); 502 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits); 503 504 return rcu_dereference_check(net->xfrm.policy_bydst[dir].table, 505 lockdep_is_held(&net->xfrm.xfrm_policy_lock)) + hash; 506 } 507 508 static void xfrm_dst_hash_transfer(struct net *net, 509 struct hlist_head *list, 510 struct hlist_head *ndsttable, 511 unsigned int nhashmask, 512 int dir) 513 { 514 struct hlist_node *tmp, *entry0 = NULL; 515 struct xfrm_policy *pol; 516 unsigned int h0 = 0; 517 u8 dbits; 518 u8 sbits; 519 520 redo: 521 hlist_for_each_entry_safe(pol, tmp, list, bydst) { 522 unsigned int h; 523 524 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits); 525 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 526 pol->family, nhashmask, dbits, sbits); 527 if (!entry0) { 528 hlist_del_rcu(&pol->bydst); 529 hlist_add_head_rcu(&pol->bydst, ndsttable + h); 530 h0 = h; 531 } else { 532 if (h != h0) 533 continue; 534 hlist_del_rcu(&pol->bydst); 535 hlist_add_behind_rcu(&pol->bydst, entry0); 536 } 537 entry0 = &pol->bydst; 538 } 539 if (!hlist_empty(list)) { 540 entry0 = NULL; 541 goto redo; 542 } 543 } 544 545 static void xfrm_idx_hash_transfer(struct hlist_head *list, 546 struct hlist_head *nidxtable, 547 unsigned int nhashmask) 548 { 549 struct hlist_node *tmp; 550 struct xfrm_policy *pol; 551 552 hlist_for_each_entry_safe(pol, tmp, list, byidx) { 553 unsigned int h; 554 555 h = __idx_hash(pol->index, nhashmask); 556 hlist_add_head(&pol->byidx, nidxtable+h); 557 } 558 } 559 560 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 561 { 562 return ((old_hmask + 1) << 1) - 1; 563 } 564 565 static void xfrm_bydst_resize(struct net *net, int dir) 566 { 567 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 568 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 569 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 570 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 571 struct hlist_head *odst; 572 int i; 573 574 if (!ndst) 575 return; 576 577 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 578 write_seqcount_begin(&xfrm_policy_hash_generation); 579 580 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table, 581 lockdep_is_held(&net->xfrm.xfrm_policy_lock)); 582 583 odst = rcu_dereference_protected(net->xfrm.policy_bydst[dir].table, 584 lockdep_is_held(&net->xfrm.xfrm_policy_lock)); 585 586 for (i = hmask; i >= 0; i--) 587 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir); 588 589 rcu_assign_pointer(net->xfrm.policy_bydst[dir].table, ndst); 590 net->xfrm.policy_bydst[dir].hmask = nhashmask; 591 592 write_seqcount_end(&xfrm_policy_hash_generation); 593 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 594 595 synchronize_rcu(); 596 597 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 598 } 599 600 static void xfrm_byidx_resize(struct net *net, int total) 601 { 602 unsigned int hmask = net->xfrm.policy_idx_hmask; 603 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 604 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 605 struct hlist_head *oidx = net->xfrm.policy_byidx; 606 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 607 int i; 608 609 if (!nidx) 610 return; 611 612 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 613 614 for (i = hmask; i >= 0; i--) 615 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 616 617 net->xfrm.policy_byidx = nidx; 618 net->xfrm.policy_idx_hmask = nhashmask; 619 620 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 621 622 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 623 } 624 625 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) 626 { 627 unsigned int cnt = net->xfrm.policy_count[dir]; 628 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 629 630 if (total) 631 *total += cnt; 632 633 if ((hmask + 1) < xfrm_policy_hashmax && 634 cnt > hmask) 635 return 1; 636 637 return 0; 638 } 639 640 static inline int xfrm_byidx_should_resize(struct net *net, int total) 641 { 642 unsigned int hmask = net->xfrm.policy_idx_hmask; 643 644 if ((hmask + 1) < xfrm_policy_hashmax && 645 total > hmask) 646 return 1; 647 648 return 0; 649 } 650 651 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 652 { 653 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 654 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 655 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 656 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 657 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 658 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 659 si->spdhcnt = net->xfrm.policy_idx_hmask; 660 si->spdhmcnt = xfrm_policy_hashmax; 661 } 662 EXPORT_SYMBOL(xfrm_spd_getinfo); 663 664 static DEFINE_MUTEX(hash_resize_mutex); 665 static void xfrm_hash_resize(struct work_struct *work) 666 { 667 struct net *net = container_of(work, struct net, xfrm.policy_hash_work); 668 int dir, total; 669 670 mutex_lock(&hash_resize_mutex); 671 672 total = 0; 673 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 674 if (xfrm_bydst_should_resize(net, dir, &total)) 675 xfrm_bydst_resize(net, dir); 676 } 677 if (xfrm_byidx_should_resize(net, total)) 678 xfrm_byidx_resize(net, total); 679 680 mutex_unlock(&hash_resize_mutex); 681 } 682 683 static void xfrm_hash_reset_inexact_table(struct net *net) 684 { 685 struct xfrm_pol_inexact_bin *b; 686 687 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 688 689 list_for_each_entry(b, &net->xfrm.inexact_bins, inexact_bins) 690 INIT_HLIST_HEAD(&b->hhead); 691 } 692 693 /* Make sure *pol can be inserted into fastbin. 694 * Useful to check that later insert requests will be sucessful 695 * (provided xfrm_policy_lock is held throughout). 696 */ 697 static struct xfrm_pol_inexact_bin * 698 xfrm_policy_inexact_alloc_bin(const struct xfrm_policy *pol, u8 dir) 699 { 700 struct xfrm_pol_inexact_bin *bin, *prev; 701 struct xfrm_pol_inexact_key k = { 702 .family = pol->family, 703 .type = pol->type, 704 .dir = dir, 705 .if_id = pol->if_id, 706 }; 707 struct net *net = xp_net(pol); 708 709 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 710 711 write_pnet(&k.net, net); 712 bin = rhashtable_lookup_fast(&xfrm_policy_inexact_table, &k, 713 xfrm_pol_inexact_params); 714 if (bin) 715 return bin; 716 717 bin = kzalloc(sizeof(*bin), GFP_ATOMIC); 718 if (!bin) 719 return NULL; 720 721 bin->k = k; 722 INIT_HLIST_HEAD(&bin->hhead); 723 bin->root_d = RB_ROOT; 724 bin->root_s = RB_ROOT; 725 seqcount_init(&bin->count); 726 727 prev = rhashtable_lookup_get_insert_key(&xfrm_policy_inexact_table, 728 &bin->k, &bin->head, 729 xfrm_pol_inexact_params); 730 if (!prev) { 731 list_add(&bin->inexact_bins, &net->xfrm.inexact_bins); 732 return bin; 733 } 734 735 kfree(bin); 736 737 return IS_ERR(prev) ? NULL : prev; 738 } 739 740 static bool xfrm_pol_inexact_addr_use_any_list(const xfrm_address_t *addr, 741 int family, u8 prefixlen) 742 { 743 if (xfrm_addr_any(addr, family)) 744 return true; 745 746 if (family == AF_INET6 && prefixlen < INEXACT_PREFIXLEN_IPV6) 747 return true; 748 749 if (family == AF_INET && prefixlen < INEXACT_PREFIXLEN_IPV4) 750 return true; 751 752 return false; 753 } 754 755 static bool 756 xfrm_policy_inexact_insert_use_any_list(const struct xfrm_policy *policy) 757 { 758 const xfrm_address_t *addr; 759 bool saddr_any, daddr_any; 760 u8 prefixlen; 761 762 addr = &policy->selector.saddr; 763 prefixlen = policy->selector.prefixlen_s; 764 765 saddr_any = xfrm_pol_inexact_addr_use_any_list(addr, 766 policy->family, 767 prefixlen); 768 addr = &policy->selector.daddr; 769 prefixlen = policy->selector.prefixlen_d; 770 daddr_any = xfrm_pol_inexact_addr_use_any_list(addr, 771 policy->family, 772 prefixlen); 773 return saddr_any && daddr_any; 774 } 775 776 static void xfrm_pol_inexact_node_init(struct xfrm_pol_inexact_node *node, 777 const xfrm_address_t *addr, u8 prefixlen) 778 { 779 node->addr = *addr; 780 node->prefixlen = prefixlen; 781 } 782 783 static struct xfrm_pol_inexact_node * 784 xfrm_pol_inexact_node_alloc(const xfrm_address_t *addr, u8 prefixlen) 785 { 786 struct xfrm_pol_inexact_node *node; 787 788 node = kzalloc(sizeof(*node), GFP_ATOMIC); 789 if (node) 790 xfrm_pol_inexact_node_init(node, addr, prefixlen); 791 792 return node; 793 } 794 795 static int xfrm_policy_addr_delta(const xfrm_address_t *a, 796 const xfrm_address_t *b, 797 u8 prefixlen, u16 family) 798 { 799 unsigned int pdw, pbi; 800 int delta = 0; 801 802 switch (family) { 803 case AF_INET: 804 if (sizeof(long) == 4 && prefixlen == 0) 805 return ntohl(a->a4) - ntohl(b->a4); 806 return (ntohl(a->a4) & ((~0UL << (32 - prefixlen)))) - 807 (ntohl(b->a4) & ((~0UL << (32 - prefixlen)))); 808 case AF_INET6: 809 pdw = prefixlen >> 5; 810 pbi = prefixlen & 0x1f; 811 812 if (pdw) { 813 delta = memcmp(a->a6, b->a6, pdw << 2); 814 if (delta) 815 return delta; 816 } 817 if (pbi) { 818 u32 mask = ~0u << (32 - pbi); 819 820 delta = (ntohl(a->a6[pdw]) & mask) - 821 (ntohl(b->a6[pdw]) & mask); 822 } 823 break; 824 default: 825 break; 826 } 827 828 return delta; 829 } 830 831 static void xfrm_policy_inexact_list_reinsert(struct net *net, 832 struct xfrm_pol_inexact_node *n, 833 u16 family) 834 { 835 unsigned int matched_s, matched_d; 836 struct hlist_node *newpos = NULL; 837 struct xfrm_policy *policy, *p; 838 839 matched_s = 0; 840 matched_d = 0; 841 842 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 843 bool matches_s, matches_d; 844 845 if (!policy->bydst_reinsert) 846 continue; 847 848 WARN_ON_ONCE(policy->family != family); 849 850 policy->bydst_reinsert = false; 851 hlist_for_each_entry(p, &n->hhead, bydst) { 852 if (policy->priority >= p->priority) 853 newpos = &p->bydst; 854 else 855 break; 856 } 857 858 if (newpos) 859 hlist_add_behind(&policy->bydst, newpos); 860 else 861 hlist_add_head(&policy->bydst, &n->hhead); 862 863 /* paranoia checks follow. 864 * Check that the reinserted policy matches at least 865 * saddr or daddr for current node prefix. 866 * 867 * Matching both is fine, matching saddr in one policy 868 * (but not daddr) and then matching only daddr in another 869 * is a bug. 870 */ 871 matches_s = xfrm_policy_addr_delta(&policy->selector.saddr, 872 &n->addr, 873 n->prefixlen, 874 family) == 0; 875 matches_d = xfrm_policy_addr_delta(&policy->selector.daddr, 876 &n->addr, 877 n->prefixlen, 878 family) == 0; 879 if (matches_s && matches_d) 880 continue; 881 882 WARN_ON_ONCE(!matches_s && !matches_d); 883 if (matches_s) 884 matched_s++; 885 if (matches_d) 886 matched_d++; 887 WARN_ON_ONCE(matched_s && matched_d); 888 } 889 } 890 891 static void xfrm_policy_inexact_node_reinsert(struct net *net, 892 struct xfrm_pol_inexact_node *n, 893 struct rb_root *new, 894 u16 family) 895 { 896 struct rb_node **p, *parent = NULL; 897 struct xfrm_pol_inexact_node *node; 898 899 /* we should not have another subtree here */ 900 WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root)); 901 902 p = &new->rb_node; 903 while (*p) { 904 u8 prefixlen; 905 int delta; 906 907 parent = *p; 908 node = rb_entry(*p, struct xfrm_pol_inexact_node, node); 909 910 prefixlen = min(node->prefixlen, n->prefixlen); 911 912 delta = xfrm_policy_addr_delta(&n->addr, &node->addr, 913 prefixlen, family); 914 if (delta < 0) { 915 p = &parent->rb_left; 916 } else if (delta > 0) { 917 p = &parent->rb_right; 918 } else { 919 struct xfrm_policy *tmp; 920 921 hlist_for_each_entry(tmp, &node->hhead, bydst) 922 tmp->bydst_reinsert = true; 923 hlist_for_each_entry(tmp, &n->hhead, bydst) 924 tmp->bydst_reinsert = true; 925 926 INIT_HLIST_HEAD(&node->hhead); 927 xfrm_policy_inexact_list_reinsert(net, node, family); 928 929 if (node->prefixlen == n->prefixlen) { 930 kfree_rcu(n, rcu); 931 return; 932 } 933 934 rb_erase(*p, new); 935 kfree_rcu(n, rcu); 936 n = node; 937 n->prefixlen = prefixlen; 938 *p = new->rb_node; 939 parent = NULL; 940 } 941 } 942 943 rb_link_node_rcu(&n->node, parent, p); 944 rb_insert_color(&n->node, new); 945 } 946 947 /* merge nodes v and n */ 948 static void xfrm_policy_inexact_node_merge(struct net *net, 949 struct xfrm_pol_inexact_node *v, 950 struct xfrm_pol_inexact_node *n, 951 u16 family) 952 { 953 struct xfrm_pol_inexact_node *node; 954 struct xfrm_policy *tmp; 955 struct rb_node *rnode; 956 957 /* To-be-merged node v has a subtree. 958 * 959 * Dismantle it and insert its nodes to n->root. 960 */ 961 while ((rnode = rb_first(&v->root)) != NULL) { 962 node = rb_entry(rnode, struct xfrm_pol_inexact_node, node); 963 rb_erase(&node->node, &v->root); 964 xfrm_policy_inexact_node_reinsert(net, node, &n->root, 965 family); 966 } 967 968 hlist_for_each_entry(tmp, &v->hhead, bydst) 969 tmp->bydst_reinsert = true; 970 hlist_for_each_entry(tmp, &n->hhead, bydst) 971 tmp->bydst_reinsert = true; 972 973 INIT_HLIST_HEAD(&n->hhead); 974 xfrm_policy_inexact_list_reinsert(net, n, family); 975 } 976 977 static struct xfrm_pol_inexact_node * 978 xfrm_policy_inexact_insert_node(struct net *net, 979 struct rb_root *root, 980 xfrm_address_t *addr, 981 u16 family, u8 prefixlen, u8 dir) 982 { 983 struct xfrm_pol_inexact_node *cached = NULL; 984 struct rb_node **p, *parent = NULL; 985 struct xfrm_pol_inexact_node *node; 986 987 p = &root->rb_node; 988 while (*p) { 989 int delta; 990 991 parent = *p; 992 node = rb_entry(*p, struct xfrm_pol_inexact_node, node); 993 994 delta = xfrm_policy_addr_delta(addr, &node->addr, 995 node->prefixlen, 996 family); 997 if (delta == 0 && prefixlen >= node->prefixlen) { 998 WARN_ON_ONCE(cached); /* ipsec policies got lost */ 999 return node; 1000 } 1001 1002 if (delta < 0) 1003 p = &parent->rb_left; 1004 else 1005 p = &parent->rb_right; 1006 1007 if (prefixlen < node->prefixlen) { 1008 delta = xfrm_policy_addr_delta(addr, &node->addr, 1009 prefixlen, 1010 family); 1011 if (delta) 1012 continue; 1013 1014 /* This node is a subnet of the new prefix. It needs 1015 * to be removed and re-inserted with the smaller 1016 * prefix and all nodes that are now also covered 1017 * by the reduced prefixlen. 1018 */ 1019 rb_erase(&node->node, root); 1020 1021 if (!cached) { 1022 xfrm_pol_inexact_node_init(node, addr, 1023 prefixlen); 1024 cached = node; 1025 } else { 1026 /* This node also falls within the new 1027 * prefixlen. Merge the to-be-reinserted 1028 * node and this one. 1029 */ 1030 xfrm_policy_inexact_node_merge(net, node, 1031 cached, family); 1032 kfree_rcu(node, rcu); 1033 } 1034 1035 /* restart */ 1036 p = &root->rb_node; 1037 parent = NULL; 1038 } 1039 } 1040 1041 node = cached; 1042 if (!node) { 1043 node = xfrm_pol_inexact_node_alloc(addr, prefixlen); 1044 if (!node) 1045 return NULL; 1046 } 1047 1048 rb_link_node_rcu(&node->node, parent, p); 1049 rb_insert_color(&node->node, root); 1050 1051 return node; 1052 } 1053 1054 static void xfrm_policy_inexact_gc_tree(struct rb_root *r, bool rm) 1055 { 1056 struct xfrm_pol_inexact_node *node; 1057 struct rb_node *rn = rb_first(r); 1058 1059 while (rn) { 1060 node = rb_entry(rn, struct xfrm_pol_inexact_node, node); 1061 1062 xfrm_policy_inexact_gc_tree(&node->root, rm); 1063 rn = rb_next(rn); 1064 1065 if (!hlist_empty(&node->hhead) || !RB_EMPTY_ROOT(&node->root)) { 1066 WARN_ON_ONCE(rm); 1067 continue; 1068 } 1069 1070 rb_erase(&node->node, r); 1071 kfree_rcu(node, rcu); 1072 } 1073 } 1074 1075 static void __xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b, bool net_exit) 1076 { 1077 write_seqcount_begin(&b->count); 1078 xfrm_policy_inexact_gc_tree(&b->root_d, net_exit); 1079 xfrm_policy_inexact_gc_tree(&b->root_s, net_exit); 1080 write_seqcount_end(&b->count); 1081 1082 if (!RB_EMPTY_ROOT(&b->root_d) || !RB_EMPTY_ROOT(&b->root_s) || 1083 !hlist_empty(&b->hhead)) { 1084 WARN_ON_ONCE(net_exit); 1085 return; 1086 } 1087 1088 if (rhashtable_remove_fast(&xfrm_policy_inexact_table, &b->head, 1089 xfrm_pol_inexact_params) == 0) { 1090 list_del(&b->inexact_bins); 1091 kfree_rcu(b, rcu); 1092 } 1093 } 1094 1095 static void xfrm_policy_inexact_prune_bin(struct xfrm_pol_inexact_bin *b) 1096 { 1097 struct net *net = read_pnet(&b->k.net); 1098 1099 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1100 __xfrm_policy_inexact_prune_bin(b, false); 1101 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1102 } 1103 1104 static void __xfrm_policy_inexact_flush(struct net *net) 1105 { 1106 struct xfrm_pol_inexact_bin *bin, *t; 1107 1108 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1109 1110 list_for_each_entry_safe(bin, t, &net->xfrm.inexact_bins, inexact_bins) 1111 __xfrm_policy_inexact_prune_bin(bin, false); 1112 } 1113 1114 static struct hlist_head * 1115 xfrm_policy_inexact_alloc_chain(struct xfrm_pol_inexact_bin *bin, 1116 struct xfrm_policy *policy, u8 dir) 1117 { 1118 struct xfrm_pol_inexact_node *n; 1119 struct net *net; 1120 1121 net = xp_net(policy); 1122 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1123 1124 if (xfrm_policy_inexact_insert_use_any_list(policy)) 1125 return &bin->hhead; 1126 1127 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.daddr, 1128 policy->family, 1129 policy->selector.prefixlen_d)) { 1130 write_seqcount_begin(&bin->count); 1131 n = xfrm_policy_inexact_insert_node(net, 1132 &bin->root_s, 1133 &policy->selector.saddr, 1134 policy->family, 1135 policy->selector.prefixlen_s, 1136 dir); 1137 write_seqcount_end(&bin->count); 1138 if (!n) 1139 return NULL; 1140 1141 return &n->hhead; 1142 } 1143 1144 /* daddr is fixed */ 1145 write_seqcount_begin(&bin->count); 1146 n = xfrm_policy_inexact_insert_node(net, 1147 &bin->root_d, 1148 &policy->selector.daddr, 1149 policy->family, 1150 policy->selector.prefixlen_d, dir); 1151 write_seqcount_end(&bin->count); 1152 if (!n) 1153 return NULL; 1154 1155 /* saddr is wildcard */ 1156 if (xfrm_pol_inexact_addr_use_any_list(&policy->selector.saddr, 1157 policy->family, 1158 policy->selector.prefixlen_s)) 1159 return &n->hhead; 1160 1161 write_seqcount_begin(&bin->count); 1162 n = xfrm_policy_inexact_insert_node(net, 1163 &n->root, 1164 &policy->selector.saddr, 1165 policy->family, 1166 policy->selector.prefixlen_s, dir); 1167 write_seqcount_end(&bin->count); 1168 if (!n) 1169 return NULL; 1170 1171 return &n->hhead; 1172 } 1173 1174 static struct xfrm_policy * 1175 xfrm_policy_inexact_insert(struct xfrm_policy *policy, u8 dir, int excl) 1176 { 1177 struct xfrm_pol_inexact_bin *bin; 1178 struct xfrm_policy *delpol; 1179 struct hlist_head *chain; 1180 struct net *net; 1181 1182 bin = xfrm_policy_inexact_alloc_bin(policy, dir); 1183 if (!bin) 1184 return ERR_PTR(-ENOMEM); 1185 1186 net = xp_net(policy); 1187 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1188 1189 chain = xfrm_policy_inexact_alloc_chain(bin, policy, dir); 1190 if (!chain) { 1191 __xfrm_policy_inexact_prune_bin(bin, false); 1192 return ERR_PTR(-ENOMEM); 1193 } 1194 1195 delpol = xfrm_policy_insert_list(chain, policy, excl); 1196 if (delpol && excl) { 1197 __xfrm_policy_inexact_prune_bin(bin, false); 1198 return ERR_PTR(-EEXIST); 1199 } 1200 1201 chain = &net->xfrm.policy_inexact[dir]; 1202 xfrm_policy_insert_inexact_list(chain, policy); 1203 1204 if (delpol) 1205 __xfrm_policy_inexact_prune_bin(bin, false); 1206 1207 return delpol; 1208 } 1209 1210 static void xfrm_hash_rebuild(struct work_struct *work) 1211 { 1212 struct net *net = container_of(work, struct net, 1213 xfrm.policy_hthresh.work); 1214 unsigned int hmask; 1215 struct xfrm_policy *pol; 1216 struct xfrm_policy *policy; 1217 struct hlist_head *chain; 1218 struct hlist_head *odst; 1219 struct hlist_node *newpos; 1220 int i; 1221 int dir; 1222 unsigned seq; 1223 u8 lbits4, rbits4, lbits6, rbits6; 1224 1225 mutex_lock(&hash_resize_mutex); 1226 1227 /* read selector prefixlen thresholds */ 1228 do { 1229 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock); 1230 1231 lbits4 = net->xfrm.policy_hthresh.lbits4; 1232 rbits4 = net->xfrm.policy_hthresh.rbits4; 1233 lbits6 = net->xfrm.policy_hthresh.lbits6; 1234 rbits6 = net->xfrm.policy_hthresh.rbits6; 1235 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); 1236 1237 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1238 1239 /* make sure that we can insert the indirect policies again before 1240 * we start with destructive action. 1241 */ 1242 list_for_each_entry(policy, &net->xfrm.policy_all, walk.all) { 1243 struct xfrm_pol_inexact_bin *bin; 1244 u8 dbits, sbits; 1245 1246 dir = xfrm_policy_id2dir(policy->index); 1247 if (policy->walk.dead || dir >= XFRM_POLICY_MAX) 1248 continue; 1249 1250 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { 1251 if (policy->family == AF_INET) { 1252 dbits = rbits4; 1253 sbits = lbits4; 1254 } else { 1255 dbits = rbits6; 1256 sbits = lbits6; 1257 } 1258 } else { 1259 if (policy->family == AF_INET) { 1260 dbits = lbits4; 1261 sbits = rbits4; 1262 } else { 1263 dbits = lbits6; 1264 sbits = rbits6; 1265 } 1266 } 1267 1268 if (policy->selector.prefixlen_d < dbits || 1269 policy->selector.prefixlen_s < sbits) 1270 continue; 1271 1272 bin = xfrm_policy_inexact_alloc_bin(policy, dir); 1273 if (!bin) 1274 goto out_unlock; 1275 1276 if (!xfrm_policy_inexact_alloc_chain(bin, policy, dir)) 1277 goto out_unlock; 1278 } 1279 1280 /* reset the bydst and inexact table in all directions */ 1281 xfrm_hash_reset_inexact_table(net); 1282 1283 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 1284 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 1285 hmask = net->xfrm.policy_bydst[dir].hmask; 1286 odst = net->xfrm.policy_bydst[dir].table; 1287 for (i = hmask; i >= 0; i--) 1288 INIT_HLIST_HEAD(odst + i); 1289 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { 1290 /* dir out => dst = remote, src = local */ 1291 net->xfrm.policy_bydst[dir].dbits4 = rbits4; 1292 net->xfrm.policy_bydst[dir].sbits4 = lbits4; 1293 net->xfrm.policy_bydst[dir].dbits6 = rbits6; 1294 net->xfrm.policy_bydst[dir].sbits6 = lbits6; 1295 } else { 1296 /* dir in/fwd => dst = local, src = remote */ 1297 net->xfrm.policy_bydst[dir].dbits4 = lbits4; 1298 net->xfrm.policy_bydst[dir].sbits4 = rbits4; 1299 net->xfrm.policy_bydst[dir].dbits6 = lbits6; 1300 net->xfrm.policy_bydst[dir].sbits6 = rbits6; 1301 } 1302 } 1303 1304 /* re-insert all policies by order of creation */ 1305 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 1306 if (policy->walk.dead) 1307 continue; 1308 dir = xfrm_policy_id2dir(policy->index); 1309 if (dir >= XFRM_POLICY_MAX) { 1310 /* skip socket policies */ 1311 continue; 1312 } 1313 newpos = NULL; 1314 chain = policy_hash_bysel(net, &policy->selector, 1315 policy->family, dir); 1316 if (!chain) { 1317 void *p = xfrm_policy_inexact_insert(policy, dir, 0); 1318 1319 WARN_ONCE(IS_ERR(p), "reinsert: %ld\n", PTR_ERR(p)); 1320 continue; 1321 } 1322 1323 hlist_for_each_entry(pol, chain, bydst) { 1324 if (policy->priority >= pol->priority) 1325 newpos = &pol->bydst; 1326 else 1327 break; 1328 } 1329 if (newpos) 1330 hlist_add_behind_rcu(&policy->bydst, newpos); 1331 else 1332 hlist_add_head_rcu(&policy->bydst, chain); 1333 } 1334 1335 out_unlock: 1336 __xfrm_policy_inexact_flush(net); 1337 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1338 1339 mutex_unlock(&hash_resize_mutex); 1340 } 1341 1342 void xfrm_policy_hash_rebuild(struct net *net) 1343 { 1344 schedule_work(&net->xfrm.policy_hthresh.work); 1345 } 1346 EXPORT_SYMBOL(xfrm_policy_hash_rebuild); 1347 1348 /* Generate new index... KAME seems to generate them ordered by cost 1349 * of an absolute inpredictability of ordering of rules. This will not pass. */ 1350 static u32 xfrm_gen_index(struct net *net, int dir, u32 index) 1351 { 1352 static u32 idx_generator; 1353 1354 for (;;) { 1355 struct hlist_head *list; 1356 struct xfrm_policy *p; 1357 u32 idx; 1358 int found; 1359 1360 if (!index) { 1361 idx = (idx_generator | dir); 1362 idx_generator += 8; 1363 } else { 1364 idx = index; 1365 index = 0; 1366 } 1367 1368 if (idx == 0) 1369 idx = 8; 1370 list = net->xfrm.policy_byidx + idx_hash(net, idx); 1371 found = 0; 1372 hlist_for_each_entry(p, list, byidx) { 1373 if (p->index == idx) { 1374 found = 1; 1375 break; 1376 } 1377 } 1378 if (!found) 1379 return idx; 1380 } 1381 } 1382 1383 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 1384 { 1385 u32 *p1 = (u32 *) s1; 1386 u32 *p2 = (u32 *) s2; 1387 int len = sizeof(struct xfrm_selector) / sizeof(u32); 1388 int i; 1389 1390 for (i = 0; i < len; i++) { 1391 if (p1[i] != p2[i]) 1392 return 1; 1393 } 1394 1395 return 0; 1396 } 1397 1398 static void xfrm_policy_requeue(struct xfrm_policy *old, 1399 struct xfrm_policy *new) 1400 { 1401 struct xfrm_policy_queue *pq = &old->polq; 1402 struct sk_buff_head list; 1403 1404 if (skb_queue_empty(&pq->hold_queue)) 1405 return; 1406 1407 __skb_queue_head_init(&list); 1408 1409 spin_lock_bh(&pq->hold_queue.lock); 1410 skb_queue_splice_init(&pq->hold_queue, &list); 1411 if (del_timer(&pq->hold_timer)) 1412 xfrm_pol_put(old); 1413 spin_unlock_bh(&pq->hold_queue.lock); 1414 1415 pq = &new->polq; 1416 1417 spin_lock_bh(&pq->hold_queue.lock); 1418 skb_queue_splice(&list, &pq->hold_queue); 1419 pq->timeout = XFRM_QUEUE_TMO_MIN; 1420 if (!mod_timer(&pq->hold_timer, jiffies)) 1421 xfrm_pol_hold(new); 1422 spin_unlock_bh(&pq->hold_queue.lock); 1423 } 1424 1425 static bool xfrm_policy_mark_match(struct xfrm_policy *policy, 1426 struct xfrm_policy *pol) 1427 { 1428 u32 mark = policy->mark.v & policy->mark.m; 1429 1430 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m) 1431 return true; 1432 1433 if ((mark & pol->mark.m) == pol->mark.v && 1434 policy->priority == pol->priority) 1435 return true; 1436 1437 return false; 1438 } 1439 1440 static u32 xfrm_pol_bin_key(const void *data, u32 len, u32 seed) 1441 { 1442 const struct xfrm_pol_inexact_key *k = data; 1443 u32 a = k->type << 24 | k->dir << 16 | k->family; 1444 1445 return jhash_3words(a, k->if_id, net_hash_mix(read_pnet(&k->net)), 1446 seed); 1447 } 1448 1449 static u32 xfrm_pol_bin_obj(const void *data, u32 len, u32 seed) 1450 { 1451 const struct xfrm_pol_inexact_bin *b = data; 1452 1453 return xfrm_pol_bin_key(&b->k, 0, seed); 1454 } 1455 1456 static int xfrm_pol_bin_cmp(struct rhashtable_compare_arg *arg, 1457 const void *ptr) 1458 { 1459 const struct xfrm_pol_inexact_key *key = arg->key; 1460 const struct xfrm_pol_inexact_bin *b = ptr; 1461 int ret; 1462 1463 if (!net_eq(read_pnet(&b->k.net), read_pnet(&key->net))) 1464 return -1; 1465 1466 ret = b->k.dir ^ key->dir; 1467 if (ret) 1468 return ret; 1469 1470 ret = b->k.type ^ key->type; 1471 if (ret) 1472 return ret; 1473 1474 ret = b->k.family ^ key->family; 1475 if (ret) 1476 return ret; 1477 1478 return b->k.if_id ^ key->if_id; 1479 } 1480 1481 static const struct rhashtable_params xfrm_pol_inexact_params = { 1482 .head_offset = offsetof(struct xfrm_pol_inexact_bin, head), 1483 .hashfn = xfrm_pol_bin_key, 1484 .obj_hashfn = xfrm_pol_bin_obj, 1485 .obj_cmpfn = xfrm_pol_bin_cmp, 1486 .automatic_shrinking = true, 1487 }; 1488 1489 static void xfrm_policy_insert_inexact_list(struct hlist_head *chain, 1490 struct xfrm_policy *policy) 1491 { 1492 struct xfrm_policy *pol, *delpol = NULL; 1493 struct hlist_node *newpos = NULL; 1494 int i = 0; 1495 1496 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 1497 if (pol->type == policy->type && 1498 pol->if_id == policy->if_id && 1499 !selector_cmp(&pol->selector, &policy->selector) && 1500 xfrm_policy_mark_match(policy, pol) && 1501 xfrm_sec_ctx_match(pol->security, policy->security) && 1502 !WARN_ON(delpol)) { 1503 delpol = pol; 1504 if (policy->priority > pol->priority) 1505 continue; 1506 } else if (policy->priority >= pol->priority) { 1507 newpos = &pol->bydst_inexact_list; 1508 continue; 1509 } 1510 if (delpol) 1511 break; 1512 } 1513 1514 if (newpos) 1515 hlist_add_behind_rcu(&policy->bydst_inexact_list, newpos); 1516 else 1517 hlist_add_head_rcu(&policy->bydst_inexact_list, chain); 1518 1519 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 1520 pol->pos = i; 1521 i++; 1522 } 1523 } 1524 1525 static struct xfrm_policy *xfrm_policy_insert_list(struct hlist_head *chain, 1526 struct xfrm_policy *policy, 1527 bool excl) 1528 { 1529 struct xfrm_policy *pol, *newpos = NULL, *delpol = NULL; 1530 1531 hlist_for_each_entry(pol, chain, bydst) { 1532 if (pol->type == policy->type && 1533 pol->if_id == policy->if_id && 1534 !selector_cmp(&pol->selector, &policy->selector) && 1535 xfrm_policy_mark_match(policy, pol) && 1536 xfrm_sec_ctx_match(pol->security, policy->security) && 1537 !WARN_ON(delpol)) { 1538 if (excl) 1539 return ERR_PTR(-EEXIST); 1540 delpol = pol; 1541 if (policy->priority > pol->priority) 1542 continue; 1543 } else if (policy->priority >= pol->priority) { 1544 newpos = pol; 1545 continue; 1546 } 1547 if (delpol) 1548 break; 1549 } 1550 1551 if (newpos) 1552 hlist_add_behind_rcu(&policy->bydst, &newpos->bydst); 1553 else 1554 hlist_add_head_rcu(&policy->bydst, chain); 1555 1556 return delpol; 1557 } 1558 1559 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 1560 { 1561 struct net *net = xp_net(policy); 1562 struct xfrm_policy *delpol; 1563 struct hlist_head *chain; 1564 1565 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1566 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 1567 if (chain) 1568 delpol = xfrm_policy_insert_list(chain, policy, excl); 1569 else 1570 delpol = xfrm_policy_inexact_insert(policy, dir, excl); 1571 1572 if (IS_ERR(delpol)) { 1573 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1574 return PTR_ERR(delpol); 1575 } 1576 1577 __xfrm_policy_link(policy, dir); 1578 1579 /* After previous checking, family can either be AF_INET or AF_INET6 */ 1580 if (policy->family == AF_INET) 1581 rt_genid_bump_ipv4(net); 1582 else 1583 rt_genid_bump_ipv6(net); 1584 1585 if (delpol) { 1586 xfrm_policy_requeue(delpol, policy); 1587 __xfrm_policy_unlink(delpol, dir); 1588 } 1589 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index); 1590 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 1591 policy->curlft.add_time = ktime_get_real_seconds(); 1592 policy->curlft.use_time = 0; 1593 if (!mod_timer(&policy->timer, jiffies + HZ)) 1594 xfrm_pol_hold(policy); 1595 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1596 1597 if (delpol) 1598 xfrm_policy_kill(delpol); 1599 else if (xfrm_bydst_should_resize(net, dir, NULL)) 1600 schedule_work(&net->xfrm.policy_hash_work); 1601 1602 return 0; 1603 } 1604 EXPORT_SYMBOL(xfrm_policy_insert); 1605 1606 static struct xfrm_policy * 1607 __xfrm_policy_bysel_ctx(struct hlist_head *chain, u32 mark, u32 if_id, 1608 u8 type, int dir, 1609 struct xfrm_selector *sel, 1610 struct xfrm_sec_ctx *ctx) 1611 { 1612 struct xfrm_policy *pol; 1613 1614 if (!chain) 1615 return NULL; 1616 1617 hlist_for_each_entry(pol, chain, bydst) { 1618 if (pol->type == type && 1619 pol->if_id == if_id && 1620 (mark & pol->mark.m) == pol->mark.v && 1621 !selector_cmp(sel, &pol->selector) && 1622 xfrm_sec_ctx_match(ctx, pol->security)) 1623 return pol; 1624 } 1625 1626 return NULL; 1627 } 1628 1629 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id, 1630 u8 type, int dir, 1631 struct xfrm_selector *sel, 1632 struct xfrm_sec_ctx *ctx, int delete, 1633 int *err) 1634 { 1635 struct xfrm_pol_inexact_bin *bin = NULL; 1636 struct xfrm_policy *pol, *ret = NULL; 1637 struct hlist_head *chain; 1638 1639 *err = 0; 1640 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1641 chain = policy_hash_bysel(net, sel, sel->family, dir); 1642 if (!chain) { 1643 struct xfrm_pol_inexact_candidates cand; 1644 int i; 1645 1646 bin = xfrm_policy_inexact_lookup(net, type, 1647 sel->family, dir, if_id); 1648 if (!bin) { 1649 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1650 return NULL; 1651 } 1652 1653 if (!xfrm_policy_find_inexact_candidates(&cand, bin, 1654 &sel->saddr, 1655 &sel->daddr)) { 1656 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1657 return NULL; 1658 } 1659 1660 pol = NULL; 1661 for (i = 0; i < ARRAY_SIZE(cand.res); i++) { 1662 struct xfrm_policy *tmp; 1663 1664 tmp = __xfrm_policy_bysel_ctx(cand.res[i], mark, 1665 if_id, type, dir, 1666 sel, ctx); 1667 if (!tmp) 1668 continue; 1669 1670 if (!pol || tmp->pos < pol->pos) 1671 pol = tmp; 1672 } 1673 } else { 1674 pol = __xfrm_policy_bysel_ctx(chain, mark, if_id, type, dir, 1675 sel, ctx); 1676 } 1677 1678 if (pol) { 1679 xfrm_pol_hold(pol); 1680 if (delete) { 1681 *err = security_xfrm_policy_delete(pol->security); 1682 if (*err) { 1683 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1684 return pol; 1685 } 1686 __xfrm_policy_unlink(pol, dir); 1687 } 1688 ret = pol; 1689 } 1690 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1691 1692 if (ret && delete) 1693 xfrm_policy_kill(ret); 1694 if (bin && delete) 1695 xfrm_policy_inexact_prune_bin(bin); 1696 return ret; 1697 } 1698 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 1699 1700 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, 1701 u8 type, int dir, u32 id, int delete, 1702 int *err) 1703 { 1704 struct xfrm_policy *pol, *ret; 1705 struct hlist_head *chain; 1706 1707 *err = -ENOENT; 1708 if (xfrm_policy_id2dir(id) != dir) 1709 return NULL; 1710 1711 *err = 0; 1712 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1713 chain = net->xfrm.policy_byidx + idx_hash(net, id); 1714 ret = NULL; 1715 hlist_for_each_entry(pol, chain, byidx) { 1716 if (pol->type == type && pol->index == id && 1717 pol->if_id == if_id && 1718 (mark & pol->mark.m) == pol->mark.v) { 1719 xfrm_pol_hold(pol); 1720 if (delete) { 1721 *err = security_xfrm_policy_delete( 1722 pol->security); 1723 if (*err) { 1724 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1725 return pol; 1726 } 1727 __xfrm_policy_unlink(pol, dir); 1728 } 1729 ret = pol; 1730 break; 1731 } 1732 } 1733 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1734 1735 if (ret && delete) 1736 xfrm_policy_kill(ret); 1737 return ret; 1738 } 1739 EXPORT_SYMBOL(xfrm_policy_byid); 1740 1741 #ifdef CONFIG_SECURITY_NETWORK_XFRM 1742 static inline int 1743 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid) 1744 { 1745 struct xfrm_policy *pol; 1746 int err = 0; 1747 1748 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1749 if (pol->walk.dead || 1750 xfrm_policy_id2dir(pol->index) >= XFRM_POLICY_MAX || 1751 pol->type != type) 1752 continue; 1753 1754 err = security_xfrm_policy_delete(pol->security); 1755 if (err) { 1756 xfrm_audit_policy_delete(pol, 0, task_valid); 1757 return err; 1758 } 1759 } 1760 return err; 1761 } 1762 #else 1763 static inline int 1764 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid) 1765 { 1766 return 0; 1767 } 1768 #endif 1769 1770 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid) 1771 { 1772 int dir, err = 0, cnt = 0; 1773 struct xfrm_policy *pol; 1774 1775 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1776 1777 err = xfrm_policy_flush_secctx_check(net, type, task_valid); 1778 if (err) 1779 goto out; 1780 1781 again: 1782 list_for_each_entry(pol, &net->xfrm.policy_all, walk.all) { 1783 dir = xfrm_policy_id2dir(pol->index); 1784 if (pol->walk.dead || 1785 dir >= XFRM_POLICY_MAX || 1786 pol->type != type) 1787 continue; 1788 1789 __xfrm_policy_unlink(pol, dir); 1790 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1791 cnt++; 1792 xfrm_audit_policy_delete(pol, 1, task_valid); 1793 xfrm_policy_kill(pol); 1794 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1795 goto again; 1796 } 1797 if (cnt) 1798 __xfrm_policy_inexact_flush(net); 1799 else 1800 err = -ESRCH; 1801 out: 1802 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1803 return err; 1804 } 1805 EXPORT_SYMBOL(xfrm_policy_flush); 1806 1807 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 1808 int (*func)(struct xfrm_policy *, int, int, void*), 1809 void *data) 1810 { 1811 struct xfrm_policy *pol; 1812 struct xfrm_policy_walk_entry *x; 1813 int error = 0; 1814 1815 if (walk->type >= XFRM_POLICY_TYPE_MAX && 1816 walk->type != XFRM_POLICY_TYPE_ANY) 1817 return -EINVAL; 1818 1819 if (list_empty(&walk->walk.all) && walk->seq != 0) 1820 return 0; 1821 1822 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 1823 if (list_empty(&walk->walk.all)) 1824 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); 1825 else 1826 x = list_first_entry(&walk->walk.all, 1827 struct xfrm_policy_walk_entry, all); 1828 1829 list_for_each_entry_from(x, &net->xfrm.policy_all, all) { 1830 if (x->dead) 1831 continue; 1832 pol = container_of(x, struct xfrm_policy, walk); 1833 if (walk->type != XFRM_POLICY_TYPE_ANY && 1834 walk->type != pol->type) 1835 continue; 1836 error = func(pol, xfrm_policy_id2dir(pol->index), 1837 walk->seq, data); 1838 if (error) { 1839 list_move_tail(&walk->walk.all, &x->all); 1840 goto out; 1841 } 1842 walk->seq++; 1843 } 1844 if (walk->seq == 0) { 1845 error = -ENOENT; 1846 goto out; 1847 } 1848 list_del_init(&walk->walk.all); 1849 out: 1850 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1851 return error; 1852 } 1853 EXPORT_SYMBOL(xfrm_policy_walk); 1854 1855 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) 1856 { 1857 INIT_LIST_HEAD(&walk->walk.all); 1858 walk->walk.dead = 1; 1859 walk->type = type; 1860 walk->seq = 0; 1861 } 1862 EXPORT_SYMBOL(xfrm_policy_walk_init); 1863 1864 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net) 1865 { 1866 if (list_empty(&walk->walk.all)) 1867 return; 1868 1869 spin_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */ 1870 list_del(&walk->walk.all); 1871 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 1872 } 1873 EXPORT_SYMBOL(xfrm_policy_walk_done); 1874 1875 /* 1876 * Find policy to apply to this flow. 1877 * 1878 * Returns 0 if policy found, else an -errno. 1879 */ 1880 static int xfrm_policy_match(const struct xfrm_policy *pol, 1881 const struct flowi *fl, 1882 u8 type, u16 family, int dir, u32 if_id) 1883 { 1884 const struct xfrm_selector *sel = &pol->selector; 1885 int ret = -ESRCH; 1886 bool match; 1887 1888 if (pol->family != family || 1889 pol->if_id != if_id || 1890 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 1891 pol->type != type) 1892 return ret; 1893 1894 match = xfrm_selector_match(sel, fl, family); 1895 if (match) 1896 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, 1897 dir); 1898 return ret; 1899 } 1900 1901 static struct xfrm_pol_inexact_node * 1902 xfrm_policy_lookup_inexact_addr(const struct rb_root *r, 1903 seqcount_t *count, 1904 const xfrm_address_t *addr, u16 family) 1905 { 1906 const struct rb_node *parent; 1907 int seq; 1908 1909 again: 1910 seq = read_seqcount_begin(count); 1911 1912 parent = rcu_dereference_raw(r->rb_node); 1913 while (parent) { 1914 struct xfrm_pol_inexact_node *node; 1915 int delta; 1916 1917 node = rb_entry(parent, struct xfrm_pol_inexact_node, node); 1918 1919 delta = xfrm_policy_addr_delta(addr, &node->addr, 1920 node->prefixlen, family); 1921 if (delta < 0) { 1922 parent = rcu_dereference_raw(parent->rb_left); 1923 continue; 1924 } else if (delta > 0) { 1925 parent = rcu_dereference_raw(parent->rb_right); 1926 continue; 1927 } 1928 1929 return node; 1930 } 1931 1932 if (read_seqcount_retry(count, seq)) 1933 goto again; 1934 1935 return NULL; 1936 } 1937 1938 static bool 1939 xfrm_policy_find_inexact_candidates(struct xfrm_pol_inexact_candidates *cand, 1940 struct xfrm_pol_inexact_bin *b, 1941 const xfrm_address_t *saddr, 1942 const xfrm_address_t *daddr) 1943 { 1944 struct xfrm_pol_inexact_node *n; 1945 u16 family; 1946 1947 if (!b) 1948 return false; 1949 1950 family = b->k.family; 1951 memset(cand, 0, sizeof(*cand)); 1952 cand->res[XFRM_POL_CAND_ANY] = &b->hhead; 1953 1954 n = xfrm_policy_lookup_inexact_addr(&b->root_d, &b->count, daddr, 1955 family); 1956 if (n) { 1957 cand->res[XFRM_POL_CAND_DADDR] = &n->hhead; 1958 n = xfrm_policy_lookup_inexact_addr(&n->root, &b->count, saddr, 1959 family); 1960 if (n) 1961 cand->res[XFRM_POL_CAND_BOTH] = &n->hhead; 1962 } 1963 1964 n = xfrm_policy_lookup_inexact_addr(&b->root_s, &b->count, saddr, 1965 family); 1966 if (n) 1967 cand->res[XFRM_POL_CAND_SADDR] = &n->hhead; 1968 1969 return true; 1970 } 1971 1972 static struct xfrm_pol_inexact_bin * 1973 xfrm_policy_inexact_lookup_rcu(struct net *net, u8 type, u16 family, 1974 u8 dir, u32 if_id) 1975 { 1976 struct xfrm_pol_inexact_key k = { 1977 .family = family, 1978 .type = type, 1979 .dir = dir, 1980 .if_id = if_id, 1981 }; 1982 1983 write_pnet(&k.net, net); 1984 1985 return rhashtable_lookup(&xfrm_policy_inexact_table, &k, 1986 xfrm_pol_inexact_params); 1987 } 1988 1989 static struct xfrm_pol_inexact_bin * 1990 xfrm_policy_inexact_lookup(struct net *net, u8 type, u16 family, 1991 u8 dir, u32 if_id) 1992 { 1993 struct xfrm_pol_inexact_bin *bin; 1994 1995 lockdep_assert_held(&net->xfrm.xfrm_policy_lock); 1996 1997 rcu_read_lock(); 1998 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id); 1999 rcu_read_unlock(); 2000 2001 return bin; 2002 } 2003 2004 static struct xfrm_policy * 2005 __xfrm_policy_eval_candidates(struct hlist_head *chain, 2006 struct xfrm_policy *prefer, 2007 const struct flowi *fl, 2008 u8 type, u16 family, int dir, u32 if_id) 2009 { 2010 u32 priority = prefer ? prefer->priority : ~0u; 2011 struct xfrm_policy *pol; 2012 2013 if (!chain) 2014 return NULL; 2015 2016 hlist_for_each_entry_rcu(pol, chain, bydst) { 2017 int err; 2018 2019 if (pol->priority > priority) 2020 break; 2021 2022 err = xfrm_policy_match(pol, fl, type, family, dir, if_id); 2023 if (err) { 2024 if (err != -ESRCH) 2025 return ERR_PTR(err); 2026 2027 continue; 2028 } 2029 2030 if (prefer) { 2031 /* matches. Is it older than *prefer? */ 2032 if (pol->priority == priority && 2033 prefer->pos < pol->pos) 2034 return prefer; 2035 } 2036 2037 return pol; 2038 } 2039 2040 return NULL; 2041 } 2042 2043 static struct xfrm_policy * 2044 xfrm_policy_eval_candidates(struct xfrm_pol_inexact_candidates *cand, 2045 struct xfrm_policy *prefer, 2046 const struct flowi *fl, 2047 u8 type, u16 family, int dir, u32 if_id) 2048 { 2049 struct xfrm_policy *tmp; 2050 int i; 2051 2052 for (i = 0; i < ARRAY_SIZE(cand->res); i++) { 2053 tmp = __xfrm_policy_eval_candidates(cand->res[i], 2054 prefer, 2055 fl, type, family, dir, 2056 if_id); 2057 if (!tmp) 2058 continue; 2059 2060 if (IS_ERR(tmp)) 2061 return tmp; 2062 prefer = tmp; 2063 } 2064 2065 return prefer; 2066 } 2067 2068 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, 2069 const struct flowi *fl, 2070 u16 family, u8 dir, 2071 u32 if_id) 2072 { 2073 struct xfrm_pol_inexact_candidates cand; 2074 const xfrm_address_t *daddr, *saddr; 2075 struct xfrm_pol_inexact_bin *bin; 2076 struct xfrm_policy *pol, *ret; 2077 struct hlist_head *chain; 2078 unsigned int sequence; 2079 int err; 2080 2081 daddr = xfrm_flowi_daddr(fl, family); 2082 saddr = xfrm_flowi_saddr(fl, family); 2083 if (unlikely(!daddr || !saddr)) 2084 return NULL; 2085 2086 rcu_read_lock(); 2087 retry: 2088 do { 2089 sequence = read_seqcount_begin(&xfrm_policy_hash_generation); 2090 chain = policy_hash_direct(net, daddr, saddr, family, dir); 2091 } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)); 2092 2093 ret = NULL; 2094 hlist_for_each_entry_rcu(pol, chain, bydst) { 2095 err = xfrm_policy_match(pol, fl, type, family, dir, if_id); 2096 if (err) { 2097 if (err == -ESRCH) 2098 continue; 2099 else { 2100 ret = ERR_PTR(err); 2101 goto fail; 2102 } 2103 } else { 2104 ret = pol; 2105 break; 2106 } 2107 } 2108 bin = xfrm_policy_inexact_lookup_rcu(net, type, family, dir, if_id); 2109 if (!bin || !xfrm_policy_find_inexact_candidates(&cand, bin, saddr, 2110 daddr)) 2111 goto skip_inexact; 2112 2113 pol = xfrm_policy_eval_candidates(&cand, ret, fl, type, 2114 family, dir, if_id); 2115 if (pol) { 2116 ret = pol; 2117 if (IS_ERR(pol)) 2118 goto fail; 2119 } 2120 2121 skip_inexact: 2122 if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) 2123 goto retry; 2124 2125 if (ret && !xfrm_pol_hold_rcu(ret)) 2126 goto retry; 2127 fail: 2128 rcu_read_unlock(); 2129 2130 return ret; 2131 } 2132 2133 static struct xfrm_policy *xfrm_policy_lookup(struct net *net, 2134 const struct flowi *fl, 2135 u16 family, u8 dir, u32 if_id) 2136 { 2137 #ifdef CONFIG_XFRM_SUB_POLICY 2138 struct xfrm_policy *pol; 2139 2140 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, 2141 dir, if_id); 2142 if (pol != NULL) 2143 return pol; 2144 #endif 2145 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, 2146 dir, if_id); 2147 } 2148 2149 static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir, 2150 const struct flowi *fl, 2151 u16 family, u32 if_id) 2152 { 2153 struct xfrm_policy *pol; 2154 2155 rcu_read_lock(); 2156 again: 2157 pol = rcu_dereference(sk->sk_policy[dir]); 2158 if (pol != NULL) { 2159 bool match; 2160 int err = 0; 2161 2162 if (pol->family != family) { 2163 pol = NULL; 2164 goto out; 2165 } 2166 2167 match = xfrm_selector_match(&pol->selector, fl, family); 2168 if (match) { 2169 if ((sk->sk_mark & pol->mark.m) != pol->mark.v || 2170 pol->if_id != if_id) { 2171 pol = NULL; 2172 goto out; 2173 } 2174 err = security_xfrm_policy_lookup(pol->security, 2175 fl->flowi_secid, 2176 dir); 2177 if (!err) { 2178 if (!xfrm_pol_hold_rcu(pol)) 2179 goto again; 2180 } else if (err == -ESRCH) { 2181 pol = NULL; 2182 } else { 2183 pol = ERR_PTR(err); 2184 } 2185 } else 2186 pol = NULL; 2187 } 2188 out: 2189 rcu_read_unlock(); 2190 return pol; 2191 } 2192 2193 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 2194 { 2195 struct net *net = xp_net(pol); 2196 2197 list_add(&pol->walk.all, &net->xfrm.policy_all); 2198 net->xfrm.policy_count[dir]++; 2199 xfrm_pol_hold(pol); 2200 } 2201 2202 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 2203 int dir) 2204 { 2205 struct net *net = xp_net(pol); 2206 2207 if (list_empty(&pol->walk.all)) 2208 return NULL; 2209 2210 /* Socket policies are not hashed. */ 2211 if (!hlist_unhashed(&pol->bydst)) { 2212 hlist_del_rcu(&pol->bydst); 2213 hlist_del_init(&pol->bydst_inexact_list); 2214 hlist_del(&pol->byidx); 2215 } 2216 2217 list_del_init(&pol->walk.all); 2218 net->xfrm.policy_count[dir]--; 2219 2220 return pol; 2221 } 2222 2223 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir) 2224 { 2225 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir); 2226 } 2227 2228 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir) 2229 { 2230 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir); 2231 } 2232 2233 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 2234 { 2235 struct net *net = xp_net(pol); 2236 2237 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2238 pol = __xfrm_policy_unlink(pol, dir); 2239 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2240 if (pol) { 2241 xfrm_policy_kill(pol); 2242 return 0; 2243 } 2244 return -ENOENT; 2245 } 2246 EXPORT_SYMBOL(xfrm_policy_delete); 2247 2248 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 2249 { 2250 struct net *net = sock_net(sk); 2251 struct xfrm_policy *old_pol; 2252 2253 #ifdef CONFIG_XFRM_SUB_POLICY 2254 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 2255 return -EINVAL; 2256 #endif 2257 2258 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2259 old_pol = rcu_dereference_protected(sk->sk_policy[dir], 2260 lockdep_is_held(&net->xfrm.xfrm_policy_lock)); 2261 if (pol) { 2262 pol->curlft.add_time = ktime_get_real_seconds(); 2263 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); 2264 xfrm_sk_policy_link(pol, dir); 2265 } 2266 rcu_assign_pointer(sk->sk_policy[dir], pol); 2267 if (old_pol) { 2268 if (pol) 2269 xfrm_policy_requeue(old_pol, pol); 2270 2271 /* Unlinking succeeds always. This is the only function 2272 * allowed to delete or replace socket policy. 2273 */ 2274 xfrm_sk_policy_unlink(old_pol, dir); 2275 } 2276 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2277 2278 if (old_pol) { 2279 xfrm_policy_kill(old_pol); 2280 } 2281 return 0; 2282 } 2283 2284 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) 2285 { 2286 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); 2287 struct net *net = xp_net(old); 2288 2289 if (newp) { 2290 newp->selector = old->selector; 2291 if (security_xfrm_policy_clone(old->security, 2292 &newp->security)) { 2293 kfree(newp); 2294 return NULL; /* ENOMEM */ 2295 } 2296 newp->lft = old->lft; 2297 newp->curlft = old->curlft; 2298 newp->mark = old->mark; 2299 newp->if_id = old->if_id; 2300 newp->action = old->action; 2301 newp->flags = old->flags; 2302 newp->xfrm_nr = old->xfrm_nr; 2303 newp->index = old->index; 2304 newp->type = old->type; 2305 newp->family = old->family; 2306 memcpy(newp->xfrm_vec, old->xfrm_vec, 2307 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 2308 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 2309 xfrm_sk_policy_link(newp, dir); 2310 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 2311 xfrm_pol_put(newp); 2312 } 2313 return newp; 2314 } 2315 2316 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) 2317 { 2318 const struct xfrm_policy *p; 2319 struct xfrm_policy *np; 2320 int i, ret = 0; 2321 2322 rcu_read_lock(); 2323 for (i = 0; i < 2; i++) { 2324 p = rcu_dereference(osk->sk_policy[i]); 2325 if (p) { 2326 np = clone_policy(p, i); 2327 if (unlikely(!np)) { 2328 ret = -ENOMEM; 2329 break; 2330 } 2331 rcu_assign_pointer(sk->sk_policy[i], np); 2332 } 2333 } 2334 rcu_read_unlock(); 2335 return ret; 2336 } 2337 2338 static int 2339 xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local, 2340 xfrm_address_t *remote, unsigned short family, u32 mark) 2341 { 2342 int err; 2343 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2344 2345 if (unlikely(afinfo == NULL)) 2346 return -EINVAL; 2347 err = afinfo->get_saddr(net, oif, local, remote, mark); 2348 rcu_read_unlock(); 2349 return err; 2350 } 2351 2352 /* Resolve list of templates for the flow, given policy. */ 2353 2354 static int 2355 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, 2356 struct xfrm_state **xfrm, unsigned short family) 2357 { 2358 struct net *net = xp_net(policy); 2359 int nx; 2360 int i, error; 2361 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 2362 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 2363 xfrm_address_t tmp; 2364 2365 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) { 2366 struct xfrm_state *x; 2367 xfrm_address_t *remote = daddr; 2368 xfrm_address_t *local = saddr; 2369 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 2370 2371 if (tmpl->mode == XFRM_MODE_TUNNEL || 2372 tmpl->mode == XFRM_MODE_BEET) { 2373 remote = &tmpl->id.daddr; 2374 local = &tmpl->saddr; 2375 if (xfrm_addr_any(local, tmpl->encap_family)) { 2376 error = xfrm_get_saddr(net, fl->flowi_oif, 2377 &tmp, remote, 2378 tmpl->encap_family, 0); 2379 if (error) 2380 goto fail; 2381 local = &tmp; 2382 } 2383 } 2384 2385 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, 2386 family, policy->if_id); 2387 2388 if (x && x->km.state == XFRM_STATE_VALID) { 2389 xfrm[nx++] = x; 2390 daddr = remote; 2391 saddr = local; 2392 continue; 2393 } 2394 if (x) { 2395 error = (x->km.state == XFRM_STATE_ERROR ? 2396 -EINVAL : -EAGAIN); 2397 xfrm_state_put(x); 2398 } else if (error == -ESRCH) { 2399 error = -EAGAIN; 2400 } 2401 2402 if (!tmpl->optional) 2403 goto fail; 2404 } 2405 return nx; 2406 2407 fail: 2408 for (nx--; nx >= 0; nx--) 2409 xfrm_state_put(xfrm[nx]); 2410 return error; 2411 } 2412 2413 static int 2414 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, 2415 struct xfrm_state **xfrm, unsigned short family) 2416 { 2417 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 2418 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 2419 int cnx = 0; 2420 int error; 2421 int ret; 2422 int i; 2423 2424 for (i = 0; i < npols; i++) { 2425 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 2426 error = -ENOBUFS; 2427 goto fail; 2428 } 2429 2430 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 2431 if (ret < 0) { 2432 error = ret; 2433 goto fail; 2434 } else 2435 cnx += ret; 2436 } 2437 2438 /* found states are sorted for outbound processing */ 2439 if (npols > 1) 2440 xfrm_state_sort(xfrm, tpp, cnx, family); 2441 2442 return cnx; 2443 2444 fail: 2445 for (cnx--; cnx >= 0; cnx--) 2446 xfrm_state_put(tpp[cnx]); 2447 return error; 2448 2449 } 2450 2451 static int xfrm_get_tos(const struct flowi *fl, int family) 2452 { 2453 const struct xfrm_policy_afinfo *afinfo; 2454 int tos; 2455 2456 afinfo = xfrm_policy_get_afinfo(family); 2457 if (!afinfo) 2458 return 0; 2459 2460 tos = afinfo->get_tos(fl); 2461 2462 rcu_read_unlock(); 2463 2464 return tos; 2465 } 2466 2467 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 2468 { 2469 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2470 struct dst_ops *dst_ops; 2471 struct xfrm_dst *xdst; 2472 2473 if (!afinfo) 2474 return ERR_PTR(-EINVAL); 2475 2476 switch (family) { 2477 case AF_INET: 2478 dst_ops = &net->xfrm.xfrm4_dst_ops; 2479 break; 2480 #if IS_ENABLED(CONFIG_IPV6) 2481 case AF_INET6: 2482 dst_ops = &net->xfrm.xfrm6_dst_ops; 2483 break; 2484 #endif 2485 default: 2486 BUG(); 2487 } 2488 xdst = dst_alloc(dst_ops, NULL, 1, DST_OBSOLETE_NONE, 0); 2489 2490 if (likely(xdst)) { 2491 struct dst_entry *dst = &xdst->u.dst; 2492 2493 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); 2494 } else 2495 xdst = ERR_PTR(-ENOBUFS); 2496 2497 rcu_read_unlock(); 2498 2499 return xdst; 2500 } 2501 2502 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 2503 int nfheader_len) 2504 { 2505 const struct xfrm_policy_afinfo *afinfo = 2506 xfrm_policy_get_afinfo(dst->ops->family); 2507 int err; 2508 2509 if (!afinfo) 2510 return -EINVAL; 2511 2512 err = afinfo->init_path(path, dst, nfheader_len); 2513 2514 rcu_read_unlock(); 2515 2516 return err; 2517 } 2518 2519 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, 2520 const struct flowi *fl) 2521 { 2522 const struct xfrm_policy_afinfo *afinfo = 2523 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 2524 int err; 2525 2526 if (!afinfo) 2527 return -EINVAL; 2528 2529 err = afinfo->fill_dst(xdst, dev, fl); 2530 2531 rcu_read_unlock(); 2532 2533 return err; 2534 } 2535 2536 2537 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 2538 * all the metrics... Shortly, bundle a bundle. 2539 */ 2540 2541 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 2542 struct xfrm_state **xfrm, 2543 struct xfrm_dst **bundle, 2544 int nx, 2545 const struct flowi *fl, 2546 struct dst_entry *dst) 2547 { 2548 struct net *net = xp_net(policy); 2549 unsigned long now = jiffies; 2550 struct net_device *dev; 2551 struct xfrm_mode *inner_mode; 2552 struct xfrm_dst *xdst_prev = NULL; 2553 struct xfrm_dst *xdst0 = NULL; 2554 int i = 0; 2555 int err; 2556 int header_len = 0; 2557 int nfheader_len = 0; 2558 int trailer_len = 0; 2559 int tos; 2560 int family = policy->selector.family; 2561 xfrm_address_t saddr, daddr; 2562 2563 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 2564 2565 tos = xfrm_get_tos(fl, family); 2566 2567 dst_hold(dst); 2568 2569 for (; i < nx; i++) { 2570 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 2571 struct dst_entry *dst1 = &xdst->u.dst; 2572 2573 err = PTR_ERR(xdst); 2574 if (IS_ERR(xdst)) { 2575 dst_release(dst); 2576 goto put_states; 2577 } 2578 2579 bundle[i] = xdst; 2580 if (!xdst_prev) 2581 xdst0 = xdst; 2582 else 2583 /* Ref count is taken during xfrm_alloc_dst() 2584 * No need to do dst_clone() on dst1 2585 */ 2586 xfrm_dst_set_child(xdst_prev, &xdst->u.dst); 2587 2588 if (xfrm[i]->sel.family == AF_UNSPEC) { 2589 inner_mode = xfrm_ip2inner_mode(xfrm[i], 2590 xfrm_af2proto(family)); 2591 if (!inner_mode) { 2592 err = -EAFNOSUPPORT; 2593 dst_release(dst); 2594 goto put_states; 2595 } 2596 } else 2597 inner_mode = xfrm[i]->inner_mode; 2598 2599 xdst->route = dst; 2600 dst_copy_metrics(dst1, dst); 2601 2602 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 2603 __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); 2604 2605 family = xfrm[i]->props.family; 2606 dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif, 2607 &saddr, &daddr, family, mark); 2608 err = PTR_ERR(dst); 2609 if (IS_ERR(dst)) 2610 goto put_states; 2611 } else 2612 dst_hold(dst); 2613 2614 dst1->xfrm = xfrm[i]; 2615 xdst->xfrm_genid = xfrm[i]->genid; 2616 2617 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 2618 dst1->flags |= DST_HOST; 2619 dst1->lastuse = now; 2620 2621 dst1->input = dst_discard; 2622 dst1->output = inner_mode->afinfo->output; 2623 2624 xdst_prev = xdst; 2625 2626 header_len += xfrm[i]->props.header_len; 2627 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 2628 nfheader_len += xfrm[i]->props.header_len; 2629 trailer_len += xfrm[i]->props.trailer_len; 2630 } 2631 2632 xfrm_dst_set_child(xdst_prev, dst); 2633 xdst0->path = dst; 2634 2635 err = -ENODEV; 2636 dev = dst->dev; 2637 if (!dev) 2638 goto free_dst; 2639 2640 xfrm_init_path(xdst0, dst, nfheader_len); 2641 xfrm_init_pmtu(bundle, nx); 2642 2643 for (xdst_prev = xdst0; xdst_prev != (struct xfrm_dst *)dst; 2644 xdst_prev = (struct xfrm_dst *) xfrm_dst_child(&xdst_prev->u.dst)) { 2645 err = xfrm_fill_dst(xdst_prev, dev, fl); 2646 if (err) 2647 goto free_dst; 2648 2649 xdst_prev->u.dst.header_len = header_len; 2650 xdst_prev->u.dst.trailer_len = trailer_len; 2651 header_len -= xdst_prev->u.dst.xfrm->props.header_len; 2652 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len; 2653 } 2654 2655 return &xdst0->u.dst; 2656 2657 put_states: 2658 for (; i < nx; i++) 2659 xfrm_state_put(xfrm[i]); 2660 free_dst: 2661 if (xdst0) 2662 dst_release_immediate(&xdst0->u.dst); 2663 2664 return ERR_PTR(err); 2665 } 2666 2667 static int xfrm_expand_policies(const struct flowi *fl, u16 family, 2668 struct xfrm_policy **pols, 2669 int *num_pols, int *num_xfrms) 2670 { 2671 int i; 2672 2673 if (*num_pols == 0 || !pols[0]) { 2674 *num_pols = 0; 2675 *num_xfrms = 0; 2676 return 0; 2677 } 2678 if (IS_ERR(pols[0])) 2679 return PTR_ERR(pols[0]); 2680 2681 *num_xfrms = pols[0]->xfrm_nr; 2682 2683 #ifdef CONFIG_XFRM_SUB_POLICY 2684 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && 2685 pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 2686 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), 2687 XFRM_POLICY_TYPE_MAIN, 2688 fl, family, 2689 XFRM_POLICY_OUT, 2690 pols[0]->if_id); 2691 if (pols[1]) { 2692 if (IS_ERR(pols[1])) { 2693 xfrm_pols_put(pols, *num_pols); 2694 return PTR_ERR(pols[1]); 2695 } 2696 (*num_pols)++; 2697 (*num_xfrms) += pols[1]->xfrm_nr; 2698 } 2699 } 2700 #endif 2701 for (i = 0; i < *num_pols; i++) { 2702 if (pols[i]->action != XFRM_POLICY_ALLOW) { 2703 *num_xfrms = -1; 2704 break; 2705 } 2706 } 2707 2708 return 0; 2709 2710 } 2711 2712 static struct xfrm_dst * 2713 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, 2714 const struct flowi *fl, u16 family, 2715 struct dst_entry *dst_orig) 2716 { 2717 struct net *net = xp_net(pols[0]); 2718 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 2719 struct xfrm_dst *bundle[XFRM_MAX_DEPTH]; 2720 struct xfrm_dst *xdst; 2721 struct dst_entry *dst; 2722 int err; 2723 2724 /* Try to instantiate a bundle */ 2725 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 2726 if (err <= 0) { 2727 if (err == 0) 2728 return NULL; 2729 2730 if (err != -EAGAIN) 2731 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 2732 return ERR_PTR(err); 2733 } 2734 2735 dst = xfrm_bundle_create(pols[0], xfrm, bundle, err, fl, dst_orig); 2736 if (IS_ERR(dst)) { 2737 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); 2738 return ERR_CAST(dst); 2739 } 2740 2741 xdst = (struct xfrm_dst *)dst; 2742 xdst->num_xfrms = err; 2743 xdst->num_pols = num_pols; 2744 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 2745 xdst->policy_genid = atomic_read(&pols[0]->genid); 2746 2747 return xdst; 2748 } 2749 2750 static void xfrm_policy_queue_process(struct timer_list *t) 2751 { 2752 struct sk_buff *skb; 2753 struct sock *sk; 2754 struct dst_entry *dst; 2755 struct xfrm_policy *pol = from_timer(pol, t, polq.hold_timer); 2756 struct net *net = xp_net(pol); 2757 struct xfrm_policy_queue *pq = &pol->polq; 2758 struct flowi fl; 2759 struct sk_buff_head list; 2760 2761 spin_lock(&pq->hold_queue.lock); 2762 skb = skb_peek(&pq->hold_queue); 2763 if (!skb) { 2764 spin_unlock(&pq->hold_queue.lock); 2765 goto out; 2766 } 2767 dst = skb_dst(skb); 2768 sk = skb->sk; 2769 xfrm_decode_session(skb, &fl, dst->ops->family); 2770 spin_unlock(&pq->hold_queue.lock); 2771 2772 dst_hold(xfrm_dst_path(dst)); 2773 dst = xfrm_lookup(net, xfrm_dst_path(dst), &fl, sk, XFRM_LOOKUP_QUEUE); 2774 if (IS_ERR(dst)) 2775 goto purge_queue; 2776 2777 if (dst->flags & DST_XFRM_QUEUE) { 2778 dst_release(dst); 2779 2780 if (pq->timeout >= XFRM_QUEUE_TMO_MAX) 2781 goto purge_queue; 2782 2783 pq->timeout = pq->timeout << 1; 2784 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout)) 2785 xfrm_pol_hold(pol); 2786 goto out; 2787 } 2788 2789 dst_release(dst); 2790 2791 __skb_queue_head_init(&list); 2792 2793 spin_lock(&pq->hold_queue.lock); 2794 pq->timeout = 0; 2795 skb_queue_splice_init(&pq->hold_queue, &list); 2796 spin_unlock(&pq->hold_queue.lock); 2797 2798 while (!skb_queue_empty(&list)) { 2799 skb = __skb_dequeue(&list); 2800 2801 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family); 2802 dst_hold(xfrm_dst_path(skb_dst(skb))); 2803 dst = xfrm_lookup(net, xfrm_dst_path(skb_dst(skb)), &fl, skb->sk, 0); 2804 if (IS_ERR(dst)) { 2805 kfree_skb(skb); 2806 continue; 2807 } 2808 2809 nf_reset(skb); 2810 skb_dst_drop(skb); 2811 skb_dst_set(skb, dst); 2812 2813 dst_output(net, skb->sk, skb); 2814 } 2815 2816 out: 2817 xfrm_pol_put(pol); 2818 return; 2819 2820 purge_queue: 2821 pq->timeout = 0; 2822 skb_queue_purge(&pq->hold_queue); 2823 xfrm_pol_put(pol); 2824 } 2825 2826 static int xdst_queue_output(struct net *net, struct sock *sk, struct sk_buff *skb) 2827 { 2828 unsigned long sched_next; 2829 struct dst_entry *dst = skb_dst(skb); 2830 struct xfrm_dst *xdst = (struct xfrm_dst *) dst; 2831 struct xfrm_policy *pol = xdst->pols[0]; 2832 struct xfrm_policy_queue *pq = &pol->polq; 2833 2834 if (unlikely(skb_fclone_busy(sk, skb))) { 2835 kfree_skb(skb); 2836 return 0; 2837 } 2838 2839 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { 2840 kfree_skb(skb); 2841 return -EAGAIN; 2842 } 2843 2844 skb_dst_force(skb); 2845 2846 spin_lock_bh(&pq->hold_queue.lock); 2847 2848 if (!pq->timeout) 2849 pq->timeout = XFRM_QUEUE_TMO_MIN; 2850 2851 sched_next = jiffies + pq->timeout; 2852 2853 if (del_timer(&pq->hold_timer)) { 2854 if (time_before(pq->hold_timer.expires, sched_next)) 2855 sched_next = pq->hold_timer.expires; 2856 xfrm_pol_put(pol); 2857 } 2858 2859 __skb_queue_tail(&pq->hold_queue, skb); 2860 if (!mod_timer(&pq->hold_timer, sched_next)) 2861 xfrm_pol_hold(pol); 2862 2863 spin_unlock_bh(&pq->hold_queue.lock); 2864 2865 return 0; 2866 } 2867 2868 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, 2869 struct xfrm_flo *xflo, 2870 const struct flowi *fl, 2871 int num_xfrms, 2872 u16 family) 2873 { 2874 int err; 2875 struct net_device *dev; 2876 struct dst_entry *dst; 2877 struct dst_entry *dst1; 2878 struct xfrm_dst *xdst; 2879 2880 xdst = xfrm_alloc_dst(net, family); 2881 if (IS_ERR(xdst)) 2882 return xdst; 2883 2884 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) || 2885 net->xfrm.sysctl_larval_drop || 2886 num_xfrms <= 0) 2887 return xdst; 2888 2889 dst = xflo->dst_orig; 2890 dst1 = &xdst->u.dst; 2891 dst_hold(dst); 2892 xdst->route = dst; 2893 2894 dst_copy_metrics(dst1, dst); 2895 2896 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 2897 dst1->flags |= DST_HOST | DST_XFRM_QUEUE; 2898 dst1->lastuse = jiffies; 2899 2900 dst1->input = dst_discard; 2901 dst1->output = xdst_queue_output; 2902 2903 dst_hold(dst); 2904 xfrm_dst_set_child(xdst, dst); 2905 xdst->path = dst; 2906 2907 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0); 2908 2909 err = -ENODEV; 2910 dev = dst->dev; 2911 if (!dev) 2912 goto free_dst; 2913 2914 err = xfrm_fill_dst(xdst, dev, fl); 2915 if (err) 2916 goto free_dst; 2917 2918 out: 2919 return xdst; 2920 2921 free_dst: 2922 dst_release(dst1); 2923 xdst = ERR_PTR(err); 2924 goto out; 2925 } 2926 2927 static struct xfrm_dst *xfrm_bundle_lookup(struct net *net, 2928 const struct flowi *fl, 2929 u16 family, u8 dir, 2930 struct xfrm_flo *xflo, u32 if_id) 2931 { 2932 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 2933 int num_pols = 0, num_xfrms = 0, err; 2934 struct xfrm_dst *xdst; 2935 2936 /* Resolve policies to use if we couldn't get them from 2937 * previous cache entry */ 2938 num_pols = 1; 2939 pols[0] = xfrm_policy_lookup(net, fl, family, dir, if_id); 2940 err = xfrm_expand_policies(fl, family, pols, 2941 &num_pols, &num_xfrms); 2942 if (err < 0) 2943 goto inc_error; 2944 if (num_pols == 0) 2945 return NULL; 2946 if (num_xfrms <= 0) 2947 goto make_dummy_bundle; 2948 2949 xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, 2950 xflo->dst_orig); 2951 if (IS_ERR(xdst)) { 2952 err = PTR_ERR(xdst); 2953 if (err == -EREMOTE) { 2954 xfrm_pols_put(pols, num_pols); 2955 return NULL; 2956 } 2957 2958 if (err != -EAGAIN) 2959 goto error; 2960 goto make_dummy_bundle; 2961 } else if (xdst == NULL) { 2962 num_xfrms = 0; 2963 goto make_dummy_bundle; 2964 } 2965 2966 return xdst; 2967 2968 make_dummy_bundle: 2969 /* We found policies, but there's no bundles to instantiate: 2970 * either because the policy blocks, has no transformations or 2971 * we could not build template (no xfrm_states).*/ 2972 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family); 2973 if (IS_ERR(xdst)) { 2974 xfrm_pols_put(pols, num_pols); 2975 return ERR_CAST(xdst); 2976 } 2977 xdst->num_pols = num_pols; 2978 xdst->num_xfrms = num_xfrms; 2979 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 2980 2981 return xdst; 2982 2983 inc_error: 2984 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 2985 error: 2986 xfrm_pols_put(pols, num_pols); 2987 return ERR_PTR(err); 2988 } 2989 2990 static struct dst_entry *make_blackhole(struct net *net, u16 family, 2991 struct dst_entry *dst_orig) 2992 { 2993 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2994 struct dst_entry *ret; 2995 2996 if (!afinfo) { 2997 dst_release(dst_orig); 2998 return ERR_PTR(-EINVAL); 2999 } else { 3000 ret = afinfo->blackhole_route(net, dst_orig); 3001 } 3002 rcu_read_unlock(); 3003 3004 return ret; 3005 } 3006 3007 /* Finds/creates a bundle for given flow and if_id 3008 * 3009 * At the moment we eat a raw IP route. Mostly to speed up lookups 3010 * on interfaces with disabled IPsec. 3011 * 3012 * xfrm_lookup uses an if_id of 0 by default, and is provided for 3013 * compatibility 3014 */ 3015 struct dst_entry *xfrm_lookup_with_ifid(struct net *net, 3016 struct dst_entry *dst_orig, 3017 const struct flowi *fl, 3018 const struct sock *sk, 3019 int flags, u32 if_id) 3020 { 3021 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 3022 struct xfrm_dst *xdst; 3023 struct dst_entry *dst, *route; 3024 u16 family = dst_orig->ops->family; 3025 u8 dir = XFRM_POLICY_OUT; 3026 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 3027 3028 dst = NULL; 3029 xdst = NULL; 3030 route = NULL; 3031 3032 sk = sk_const_to_full_sk(sk); 3033 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 3034 num_pols = 1; 3035 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family, 3036 if_id); 3037 err = xfrm_expand_policies(fl, family, pols, 3038 &num_pols, &num_xfrms); 3039 if (err < 0) 3040 goto dropdst; 3041 3042 if (num_pols) { 3043 if (num_xfrms <= 0) { 3044 drop_pols = num_pols; 3045 goto no_transform; 3046 } 3047 3048 xdst = xfrm_resolve_and_create_bundle( 3049 pols, num_pols, fl, 3050 family, dst_orig); 3051 3052 if (IS_ERR(xdst)) { 3053 xfrm_pols_put(pols, num_pols); 3054 err = PTR_ERR(xdst); 3055 if (err == -EREMOTE) 3056 goto nopol; 3057 3058 goto dropdst; 3059 } else if (xdst == NULL) { 3060 num_xfrms = 0; 3061 drop_pols = num_pols; 3062 goto no_transform; 3063 } 3064 3065 route = xdst->route; 3066 } 3067 } 3068 3069 if (xdst == NULL) { 3070 struct xfrm_flo xflo; 3071 3072 xflo.dst_orig = dst_orig; 3073 xflo.flags = flags; 3074 3075 /* To accelerate a bit... */ 3076 if ((dst_orig->flags & DST_NOXFRM) || 3077 !net->xfrm.policy_count[XFRM_POLICY_OUT]) 3078 goto nopol; 3079 3080 xdst = xfrm_bundle_lookup(net, fl, family, dir, &xflo, if_id); 3081 if (xdst == NULL) 3082 goto nopol; 3083 if (IS_ERR(xdst)) { 3084 err = PTR_ERR(xdst); 3085 goto dropdst; 3086 } 3087 3088 num_pols = xdst->num_pols; 3089 num_xfrms = xdst->num_xfrms; 3090 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols); 3091 route = xdst->route; 3092 } 3093 3094 dst = &xdst->u.dst; 3095 if (route == NULL && num_xfrms > 0) { 3096 /* The only case when xfrm_bundle_lookup() returns a 3097 * bundle with null route, is when the template could 3098 * not be resolved. It means policies are there, but 3099 * bundle could not be created, since we don't yet 3100 * have the xfrm_state's. We need to wait for KM to 3101 * negotiate new SA's or bail out with error.*/ 3102 if (net->xfrm.sysctl_larval_drop) { 3103 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 3104 err = -EREMOTE; 3105 goto error; 3106 } 3107 3108 err = -EAGAIN; 3109 3110 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 3111 goto error; 3112 } 3113 3114 no_transform: 3115 if (num_pols == 0) 3116 goto nopol; 3117 3118 if ((flags & XFRM_LOOKUP_ICMP) && 3119 !(pols[0]->flags & XFRM_POLICY_ICMP)) { 3120 err = -ENOENT; 3121 goto error; 3122 } 3123 3124 for (i = 0; i < num_pols; i++) 3125 pols[i]->curlft.use_time = ktime_get_real_seconds(); 3126 3127 if (num_xfrms < 0) { 3128 /* Prohibit the flow */ 3129 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 3130 err = -EPERM; 3131 goto error; 3132 } else if (num_xfrms > 0) { 3133 /* Flow transformed */ 3134 dst_release(dst_orig); 3135 } else { 3136 /* Flow passes untransformed */ 3137 dst_release(dst); 3138 dst = dst_orig; 3139 } 3140 ok: 3141 xfrm_pols_put(pols, drop_pols); 3142 if (dst && dst->xfrm && 3143 dst->xfrm->props.mode == XFRM_MODE_TUNNEL) 3144 dst->flags |= DST_XFRM_TUNNEL; 3145 return dst; 3146 3147 nopol: 3148 if (!(flags & XFRM_LOOKUP_ICMP)) { 3149 dst = dst_orig; 3150 goto ok; 3151 } 3152 err = -ENOENT; 3153 error: 3154 dst_release(dst); 3155 dropdst: 3156 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF)) 3157 dst_release(dst_orig); 3158 xfrm_pols_put(pols, drop_pols); 3159 return ERR_PTR(err); 3160 } 3161 EXPORT_SYMBOL(xfrm_lookup_with_ifid); 3162 3163 /* Main function: finds/creates a bundle for given flow. 3164 * 3165 * At the moment we eat a raw IP route. Mostly to speed up lookups 3166 * on interfaces with disabled IPsec. 3167 */ 3168 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 3169 const struct flowi *fl, const struct sock *sk, 3170 int flags) 3171 { 3172 return xfrm_lookup_with_ifid(net, dst_orig, fl, sk, flags, 0); 3173 } 3174 EXPORT_SYMBOL(xfrm_lookup); 3175 3176 /* Callers of xfrm_lookup_route() must ensure a call to dst_output(). 3177 * Otherwise we may send out blackholed packets. 3178 */ 3179 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, 3180 const struct flowi *fl, 3181 const struct sock *sk, int flags) 3182 { 3183 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, 3184 flags | XFRM_LOOKUP_QUEUE | 3185 XFRM_LOOKUP_KEEP_DST_REF); 3186 3187 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) 3188 return make_blackhole(net, dst_orig->ops->family, dst_orig); 3189 3190 if (IS_ERR(dst)) 3191 dst_release(dst_orig); 3192 3193 return dst; 3194 } 3195 EXPORT_SYMBOL(xfrm_lookup_route); 3196 3197 static inline int 3198 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) 3199 { 3200 struct sec_path *sp = skb_sec_path(skb); 3201 struct xfrm_state *x; 3202 3203 if (!sp || idx < 0 || idx >= sp->len) 3204 return 0; 3205 x = sp->xvec[idx]; 3206 if (!x->type->reject) 3207 return 0; 3208 return x->type->reject(x, skb, fl); 3209 } 3210 3211 /* When skb is transformed back to its "native" form, we have to 3212 * check policy restrictions. At the moment we make this in maximally 3213 * stupid way. Shame on me. :-) Of course, connected sockets must 3214 * have policy cached at them. 3215 */ 3216 3217 static inline int 3218 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, 3219 unsigned short family) 3220 { 3221 if (xfrm_state_kern(x)) 3222 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 3223 return x->id.proto == tmpl->id.proto && 3224 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 3225 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 3226 x->props.mode == tmpl->mode && 3227 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 3228 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 3229 !(x->props.mode != XFRM_MODE_TRANSPORT && 3230 xfrm_state_addr_cmp(tmpl, x, family)); 3231 } 3232 3233 /* 3234 * 0 or more than 0 is returned when validation is succeeded (either bypass 3235 * because of optional transport mode, or next index of the mathced secpath 3236 * state with the template. 3237 * -1 is returned when no matching template is found. 3238 * Otherwise "-2 - errored_index" is returned. 3239 */ 3240 static inline int 3241 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, 3242 unsigned short family) 3243 { 3244 int idx = start; 3245 3246 if (tmpl->optional) { 3247 if (tmpl->mode == XFRM_MODE_TRANSPORT) 3248 return start; 3249 } else 3250 start = -1; 3251 for (; idx < sp->len; idx++) { 3252 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 3253 return ++idx; 3254 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 3255 if (start == -1) 3256 start = -2-idx; 3257 break; 3258 } 3259 } 3260 return start; 3261 } 3262 3263 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 3264 unsigned int family, int reverse) 3265 { 3266 const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 3267 int err; 3268 3269 if (unlikely(afinfo == NULL)) 3270 return -EAFNOSUPPORT; 3271 3272 afinfo->decode_session(skb, fl, reverse); 3273 3274 err = security_xfrm_decode_session(skb, &fl->flowi_secid); 3275 rcu_read_unlock(); 3276 return err; 3277 } 3278 EXPORT_SYMBOL(__xfrm_decode_session); 3279 3280 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) 3281 { 3282 for (; k < sp->len; k++) { 3283 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 3284 *idxp = k; 3285 return 1; 3286 } 3287 } 3288 3289 return 0; 3290 } 3291 3292 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 3293 unsigned short family) 3294 { 3295 struct net *net = dev_net(skb->dev); 3296 struct xfrm_policy *pol; 3297 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 3298 int npols = 0; 3299 int xfrm_nr; 3300 int pi; 3301 int reverse; 3302 struct flowi fl; 3303 int xerr_idx = -1; 3304 const struct xfrm_if_cb *ifcb; 3305 struct sec_path *sp; 3306 struct xfrm_if *xi; 3307 u32 if_id = 0; 3308 3309 rcu_read_lock(); 3310 ifcb = xfrm_if_get_cb(); 3311 3312 if (ifcb) { 3313 xi = ifcb->decode_session(skb); 3314 if (xi) 3315 if_id = xi->p.if_id; 3316 } 3317 rcu_read_unlock(); 3318 3319 reverse = dir & ~XFRM_POLICY_MASK; 3320 dir &= XFRM_POLICY_MASK; 3321 3322 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { 3323 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 3324 return 0; 3325 } 3326 3327 nf_nat_decode_session(skb, &fl, family); 3328 3329 /* First, check used SA against their selectors. */ 3330 sp = skb_sec_path(skb); 3331 if (sp) { 3332 int i; 3333 3334 for (i = sp->len - 1; i >= 0; i--) { 3335 struct xfrm_state *x = sp->xvec[i]; 3336 if (!xfrm_selector_match(&x->sel, &fl, family)) { 3337 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 3338 return 0; 3339 } 3340 } 3341 } 3342 3343 pol = NULL; 3344 sk = sk_to_full_sk(sk); 3345 if (sk && sk->sk_policy[dir]) { 3346 pol = xfrm_sk_policy_lookup(sk, dir, &fl, family, if_id); 3347 if (IS_ERR(pol)) { 3348 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3349 return 0; 3350 } 3351 } 3352 3353 if (!pol) 3354 pol = xfrm_policy_lookup(net, &fl, family, dir, if_id); 3355 3356 if (IS_ERR(pol)) { 3357 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3358 return 0; 3359 } 3360 3361 if (!pol) { 3362 if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) { 3363 xfrm_secpath_reject(xerr_idx, skb, &fl); 3364 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 3365 return 0; 3366 } 3367 return 1; 3368 } 3369 3370 pol->curlft.use_time = ktime_get_real_seconds(); 3371 3372 pols[0] = pol; 3373 npols++; 3374 #ifdef CONFIG_XFRM_SUB_POLICY 3375 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 3376 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, 3377 &fl, family, 3378 XFRM_POLICY_IN, if_id); 3379 if (pols[1]) { 3380 if (IS_ERR(pols[1])) { 3381 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 3382 return 0; 3383 } 3384 pols[1]->curlft.use_time = ktime_get_real_seconds(); 3385 npols++; 3386 } 3387 } 3388 #endif 3389 3390 if (pol->action == XFRM_POLICY_ALLOW) { 3391 static struct sec_path dummy; 3392 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 3393 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 3394 struct xfrm_tmpl **tpp = tp; 3395 int ti = 0; 3396 int i, k; 3397 3398 sp = skb_sec_path(skb); 3399 if (!sp) 3400 sp = &dummy; 3401 3402 for (pi = 0; pi < npols; pi++) { 3403 if (pols[pi] != pol && 3404 pols[pi]->action != XFRM_POLICY_ALLOW) { 3405 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 3406 goto reject; 3407 } 3408 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 3409 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 3410 goto reject_error; 3411 } 3412 for (i = 0; i < pols[pi]->xfrm_nr; i++) 3413 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 3414 } 3415 xfrm_nr = ti; 3416 if (npols > 1) { 3417 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net); 3418 tpp = stp; 3419 } 3420 3421 /* For each tunnel xfrm, find the first matching tmpl. 3422 * For each tmpl before that, find corresponding xfrm. 3423 * Order is _important_. Later we will implement 3424 * some barriers, but at the moment barriers 3425 * are implied between each two transformations. 3426 */ 3427 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 3428 k = xfrm_policy_ok(tpp[i], sp, k, family); 3429 if (k < 0) { 3430 if (k < -1) 3431 /* "-2 - errored_index" returned */ 3432 xerr_idx = -(2+k); 3433 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 3434 goto reject; 3435 } 3436 } 3437 3438 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 3439 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 3440 goto reject; 3441 } 3442 3443 xfrm_pols_put(pols, npols); 3444 return 1; 3445 } 3446 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 3447 3448 reject: 3449 xfrm_secpath_reject(xerr_idx, skb, &fl); 3450 reject_error: 3451 xfrm_pols_put(pols, npols); 3452 return 0; 3453 } 3454 EXPORT_SYMBOL(__xfrm_policy_check); 3455 3456 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 3457 { 3458 struct net *net = dev_net(skb->dev); 3459 struct flowi fl; 3460 struct dst_entry *dst; 3461 int res = 1; 3462 3463 if (xfrm_decode_session(skb, &fl, family) < 0) { 3464 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 3465 return 0; 3466 } 3467 3468 skb_dst_force(skb); 3469 if (!skb_dst(skb)) { 3470 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 3471 return 0; 3472 } 3473 3474 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); 3475 if (IS_ERR(dst)) { 3476 res = 0; 3477 dst = NULL; 3478 } 3479 skb_dst_set(skb, dst); 3480 return res; 3481 } 3482 EXPORT_SYMBOL(__xfrm_route_forward); 3483 3484 /* Optimize later using cookies and generation ids. */ 3485 3486 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 3487 { 3488 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 3489 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to 3490 * get validated by dst_ops->check on every use. We do this 3491 * because when a normal route referenced by an XFRM dst is 3492 * obsoleted we do not go looking around for all parent 3493 * referencing XFRM dsts so that we can invalidate them. It 3494 * is just too much work. Instead we make the checks here on 3495 * every use. For example: 3496 * 3497 * XFRM dst A --> IPv4 dst X 3498 * 3499 * X is the "xdst->route" of A (X is also the "dst->path" of A 3500 * in this example). If X is marked obsolete, "A" will not 3501 * notice. That's what we are validating here via the 3502 * stale_bundle() check. 3503 * 3504 * When a dst is removed from the fib tree, DST_OBSOLETE_DEAD will 3505 * be marked on it. 3506 * This will force stale_bundle() to fail on any xdst bundle with 3507 * this dst linked in it. 3508 */ 3509 if (dst->obsolete < 0 && !stale_bundle(dst)) 3510 return dst; 3511 3512 return NULL; 3513 } 3514 3515 static int stale_bundle(struct dst_entry *dst) 3516 { 3517 return !xfrm_bundle_ok((struct xfrm_dst *)dst); 3518 } 3519 3520 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 3521 { 3522 while ((dst = xfrm_dst_child(dst)) && dst->xfrm && dst->dev == dev) { 3523 dst->dev = dev_net(dev)->loopback_dev; 3524 dev_hold(dst->dev); 3525 dev_put(dev); 3526 } 3527 } 3528 EXPORT_SYMBOL(xfrm_dst_ifdown); 3529 3530 static void xfrm_link_failure(struct sk_buff *skb) 3531 { 3532 /* Impossible. Such dst must be popped before reaches point of failure. */ 3533 } 3534 3535 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 3536 { 3537 if (dst) { 3538 if (dst->obsolete) { 3539 dst_release(dst); 3540 dst = NULL; 3541 } 3542 } 3543 return dst; 3544 } 3545 3546 static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr) 3547 { 3548 while (nr--) { 3549 struct xfrm_dst *xdst = bundle[nr]; 3550 u32 pmtu, route_mtu_cached; 3551 struct dst_entry *dst; 3552 3553 dst = &xdst->u.dst; 3554 pmtu = dst_mtu(xfrm_dst_child(dst)); 3555 xdst->child_mtu_cached = pmtu; 3556 3557 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 3558 3559 route_mtu_cached = dst_mtu(xdst->route); 3560 xdst->route_mtu_cached = route_mtu_cached; 3561 3562 if (pmtu > route_mtu_cached) 3563 pmtu = route_mtu_cached; 3564 3565 dst_metric_set(dst, RTAX_MTU, pmtu); 3566 } 3567 } 3568 3569 /* Check that the bundle accepts the flow and its components are 3570 * still valid. 3571 */ 3572 3573 static int xfrm_bundle_ok(struct xfrm_dst *first) 3574 { 3575 struct xfrm_dst *bundle[XFRM_MAX_DEPTH]; 3576 struct dst_entry *dst = &first->u.dst; 3577 struct xfrm_dst *xdst; 3578 int start_from, nr; 3579 u32 mtu; 3580 3581 if (!dst_check(xfrm_dst_path(dst), ((struct xfrm_dst *)dst)->path_cookie) || 3582 (dst->dev && !netif_running(dst->dev))) 3583 return 0; 3584 3585 if (dst->flags & DST_XFRM_QUEUE) 3586 return 1; 3587 3588 start_from = nr = 0; 3589 do { 3590 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 3591 3592 if (dst->xfrm->km.state != XFRM_STATE_VALID) 3593 return 0; 3594 if (xdst->xfrm_genid != dst->xfrm->genid) 3595 return 0; 3596 if (xdst->num_pols > 0 && 3597 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 3598 return 0; 3599 3600 bundle[nr++] = xdst; 3601 3602 mtu = dst_mtu(xfrm_dst_child(dst)); 3603 if (xdst->child_mtu_cached != mtu) { 3604 start_from = nr; 3605 xdst->child_mtu_cached = mtu; 3606 } 3607 3608 if (!dst_check(xdst->route, xdst->route_cookie)) 3609 return 0; 3610 mtu = dst_mtu(xdst->route); 3611 if (xdst->route_mtu_cached != mtu) { 3612 start_from = nr; 3613 xdst->route_mtu_cached = mtu; 3614 } 3615 3616 dst = xfrm_dst_child(dst); 3617 } while (dst->xfrm); 3618 3619 if (likely(!start_from)) 3620 return 1; 3621 3622 xdst = bundle[start_from - 1]; 3623 mtu = xdst->child_mtu_cached; 3624 while (start_from--) { 3625 dst = &xdst->u.dst; 3626 3627 mtu = xfrm_state_mtu(dst->xfrm, mtu); 3628 if (mtu > xdst->route_mtu_cached) 3629 mtu = xdst->route_mtu_cached; 3630 dst_metric_set(dst, RTAX_MTU, mtu); 3631 if (!start_from) 3632 break; 3633 3634 xdst = bundle[start_from - 1]; 3635 xdst->child_mtu_cached = mtu; 3636 } 3637 3638 return 1; 3639 } 3640 3641 static unsigned int xfrm_default_advmss(const struct dst_entry *dst) 3642 { 3643 return dst_metric_advmss(xfrm_dst_path(dst)); 3644 } 3645 3646 static unsigned int xfrm_mtu(const struct dst_entry *dst) 3647 { 3648 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 3649 3650 return mtu ? : dst_mtu(xfrm_dst_path(dst)); 3651 } 3652 3653 static const void *xfrm_get_dst_nexthop(const struct dst_entry *dst, 3654 const void *daddr) 3655 { 3656 while (dst->xfrm) { 3657 const struct xfrm_state *xfrm = dst->xfrm; 3658 3659 dst = xfrm_dst_child(dst); 3660 3661 if (xfrm->props.mode == XFRM_MODE_TRANSPORT) 3662 continue; 3663 if (xfrm->type->flags & XFRM_TYPE_REMOTE_COADDR) 3664 daddr = xfrm->coaddr; 3665 else if (!(xfrm->type->flags & XFRM_TYPE_LOCAL_COADDR)) 3666 daddr = &xfrm->id.daddr; 3667 } 3668 return daddr; 3669 } 3670 3671 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, 3672 struct sk_buff *skb, 3673 const void *daddr) 3674 { 3675 const struct dst_entry *path = xfrm_dst_path(dst); 3676 3677 if (!skb) 3678 daddr = xfrm_get_dst_nexthop(dst, daddr); 3679 return path->ops->neigh_lookup(path, skb, daddr); 3680 } 3681 3682 static void xfrm_confirm_neigh(const struct dst_entry *dst, const void *daddr) 3683 { 3684 const struct dst_entry *path = xfrm_dst_path(dst); 3685 3686 daddr = xfrm_get_dst_nexthop(dst, daddr); 3687 path->ops->confirm_neigh(path, daddr); 3688 } 3689 3690 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family) 3691 { 3692 int err = 0; 3693 3694 if (WARN_ON(family >= ARRAY_SIZE(xfrm_policy_afinfo))) 3695 return -EAFNOSUPPORT; 3696 3697 spin_lock(&xfrm_policy_afinfo_lock); 3698 if (unlikely(xfrm_policy_afinfo[family] != NULL)) 3699 err = -EEXIST; 3700 else { 3701 struct dst_ops *dst_ops = afinfo->dst_ops; 3702 if (likely(dst_ops->kmem_cachep == NULL)) 3703 dst_ops->kmem_cachep = xfrm_dst_cache; 3704 if (likely(dst_ops->check == NULL)) 3705 dst_ops->check = xfrm_dst_check; 3706 if (likely(dst_ops->default_advmss == NULL)) 3707 dst_ops->default_advmss = xfrm_default_advmss; 3708 if (likely(dst_ops->mtu == NULL)) 3709 dst_ops->mtu = xfrm_mtu; 3710 if (likely(dst_ops->negative_advice == NULL)) 3711 dst_ops->negative_advice = xfrm_negative_advice; 3712 if (likely(dst_ops->link_failure == NULL)) 3713 dst_ops->link_failure = xfrm_link_failure; 3714 if (likely(dst_ops->neigh_lookup == NULL)) 3715 dst_ops->neigh_lookup = xfrm_neigh_lookup; 3716 if (likely(!dst_ops->confirm_neigh)) 3717 dst_ops->confirm_neigh = xfrm_confirm_neigh; 3718 rcu_assign_pointer(xfrm_policy_afinfo[family], afinfo); 3719 } 3720 spin_unlock(&xfrm_policy_afinfo_lock); 3721 3722 return err; 3723 } 3724 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 3725 3726 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo) 3727 { 3728 struct dst_ops *dst_ops = afinfo->dst_ops; 3729 int i; 3730 3731 for (i = 0; i < ARRAY_SIZE(xfrm_policy_afinfo); i++) { 3732 if (xfrm_policy_afinfo[i] != afinfo) 3733 continue; 3734 RCU_INIT_POINTER(xfrm_policy_afinfo[i], NULL); 3735 break; 3736 } 3737 3738 synchronize_rcu(); 3739 3740 dst_ops->kmem_cachep = NULL; 3741 dst_ops->check = NULL; 3742 dst_ops->negative_advice = NULL; 3743 dst_ops->link_failure = NULL; 3744 } 3745 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 3746 3747 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb) 3748 { 3749 spin_lock(&xfrm_if_cb_lock); 3750 rcu_assign_pointer(xfrm_if_cb, ifcb); 3751 spin_unlock(&xfrm_if_cb_lock); 3752 } 3753 EXPORT_SYMBOL(xfrm_if_register_cb); 3754 3755 void xfrm_if_unregister_cb(void) 3756 { 3757 RCU_INIT_POINTER(xfrm_if_cb, NULL); 3758 synchronize_rcu(); 3759 } 3760 EXPORT_SYMBOL(xfrm_if_unregister_cb); 3761 3762 #ifdef CONFIG_XFRM_STATISTICS 3763 static int __net_init xfrm_statistics_init(struct net *net) 3764 { 3765 int rv; 3766 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib); 3767 if (!net->mib.xfrm_statistics) 3768 return -ENOMEM; 3769 rv = xfrm_proc_init(net); 3770 if (rv < 0) 3771 free_percpu(net->mib.xfrm_statistics); 3772 return rv; 3773 } 3774 3775 static void xfrm_statistics_fini(struct net *net) 3776 { 3777 xfrm_proc_fini(net); 3778 free_percpu(net->mib.xfrm_statistics); 3779 } 3780 #else 3781 static int __net_init xfrm_statistics_init(struct net *net) 3782 { 3783 return 0; 3784 } 3785 3786 static void xfrm_statistics_fini(struct net *net) 3787 { 3788 } 3789 #endif 3790 3791 static int __net_init xfrm_policy_init(struct net *net) 3792 { 3793 unsigned int hmask, sz; 3794 int dir, err; 3795 3796 if (net_eq(net, &init_net)) { 3797 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 3798 sizeof(struct xfrm_dst), 3799 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 3800 NULL); 3801 err = rhashtable_init(&xfrm_policy_inexact_table, 3802 &xfrm_pol_inexact_params); 3803 BUG_ON(err); 3804 } 3805 3806 hmask = 8 - 1; 3807 sz = (hmask+1) * sizeof(struct hlist_head); 3808 3809 net->xfrm.policy_byidx = xfrm_hash_alloc(sz); 3810 if (!net->xfrm.policy_byidx) 3811 goto out_byidx; 3812 net->xfrm.policy_idx_hmask = hmask; 3813 3814 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 3815 struct xfrm_policy_hash *htab; 3816 3817 net->xfrm.policy_count[dir] = 0; 3818 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0; 3819 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 3820 3821 htab = &net->xfrm.policy_bydst[dir]; 3822 htab->table = xfrm_hash_alloc(sz); 3823 if (!htab->table) 3824 goto out_bydst; 3825 htab->hmask = hmask; 3826 htab->dbits4 = 32; 3827 htab->sbits4 = 32; 3828 htab->dbits6 = 128; 3829 htab->sbits6 = 128; 3830 } 3831 net->xfrm.policy_hthresh.lbits4 = 32; 3832 net->xfrm.policy_hthresh.rbits4 = 32; 3833 net->xfrm.policy_hthresh.lbits6 = 128; 3834 net->xfrm.policy_hthresh.rbits6 = 128; 3835 3836 seqlock_init(&net->xfrm.policy_hthresh.lock); 3837 3838 INIT_LIST_HEAD(&net->xfrm.policy_all); 3839 INIT_LIST_HEAD(&net->xfrm.inexact_bins); 3840 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 3841 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild); 3842 return 0; 3843 3844 out_bydst: 3845 for (dir--; dir >= 0; dir--) { 3846 struct xfrm_policy_hash *htab; 3847 3848 htab = &net->xfrm.policy_bydst[dir]; 3849 xfrm_hash_free(htab->table, sz); 3850 } 3851 xfrm_hash_free(net->xfrm.policy_byidx, sz); 3852 out_byidx: 3853 return -ENOMEM; 3854 } 3855 3856 static void xfrm_policy_fini(struct net *net) 3857 { 3858 struct xfrm_pol_inexact_bin *b, *t; 3859 unsigned int sz; 3860 int dir; 3861 3862 flush_work(&net->xfrm.policy_hash_work); 3863 #ifdef CONFIG_XFRM_SUB_POLICY 3864 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false); 3865 #endif 3866 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false); 3867 3868 WARN_ON(!list_empty(&net->xfrm.policy_all)); 3869 3870 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 3871 struct xfrm_policy_hash *htab; 3872 3873 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 3874 3875 htab = &net->xfrm.policy_bydst[dir]; 3876 sz = (htab->hmask + 1) * sizeof(struct hlist_head); 3877 WARN_ON(!hlist_empty(htab->table)); 3878 xfrm_hash_free(htab->table, sz); 3879 } 3880 3881 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); 3882 WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); 3883 xfrm_hash_free(net->xfrm.policy_byidx, sz); 3884 3885 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 3886 list_for_each_entry_safe(b, t, &net->xfrm.inexact_bins, inexact_bins) 3887 __xfrm_policy_inexact_prune_bin(b, true); 3888 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 3889 } 3890 3891 static int __net_init xfrm_net_init(struct net *net) 3892 { 3893 int rv; 3894 3895 /* Initialize the per-net locks here */ 3896 spin_lock_init(&net->xfrm.xfrm_state_lock); 3897 spin_lock_init(&net->xfrm.xfrm_policy_lock); 3898 mutex_init(&net->xfrm.xfrm_cfg_mutex); 3899 3900 rv = xfrm_statistics_init(net); 3901 if (rv < 0) 3902 goto out_statistics; 3903 rv = xfrm_state_init(net); 3904 if (rv < 0) 3905 goto out_state; 3906 rv = xfrm_policy_init(net); 3907 if (rv < 0) 3908 goto out_policy; 3909 rv = xfrm_sysctl_init(net); 3910 if (rv < 0) 3911 goto out_sysctl; 3912 3913 return 0; 3914 3915 out_sysctl: 3916 xfrm_policy_fini(net); 3917 out_policy: 3918 xfrm_state_fini(net); 3919 out_state: 3920 xfrm_statistics_fini(net); 3921 out_statistics: 3922 return rv; 3923 } 3924 3925 static void __net_exit xfrm_net_exit(struct net *net) 3926 { 3927 xfrm_sysctl_fini(net); 3928 xfrm_policy_fini(net); 3929 xfrm_state_fini(net); 3930 xfrm_statistics_fini(net); 3931 } 3932 3933 static struct pernet_operations __net_initdata xfrm_net_ops = { 3934 .init = xfrm_net_init, 3935 .exit = xfrm_net_exit, 3936 }; 3937 3938 void __init xfrm_init(void) 3939 { 3940 register_pernet_subsys(&xfrm_net_ops); 3941 xfrm_dev_init(); 3942 seqcount_init(&xfrm_policy_hash_generation); 3943 xfrm_input_init(); 3944 3945 RCU_INIT_POINTER(xfrm_if_cb, NULL); 3946 synchronize_rcu(); 3947 } 3948 3949 #ifdef CONFIG_AUDITSYSCALL 3950 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 3951 struct audit_buffer *audit_buf) 3952 { 3953 struct xfrm_sec_ctx *ctx = xp->security; 3954 struct xfrm_selector *sel = &xp->selector; 3955 3956 if (ctx) 3957 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 3958 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 3959 3960 switch (sel->family) { 3961 case AF_INET: 3962 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); 3963 if (sel->prefixlen_s != 32) 3964 audit_log_format(audit_buf, " src_prefixlen=%d", 3965 sel->prefixlen_s); 3966 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); 3967 if (sel->prefixlen_d != 32) 3968 audit_log_format(audit_buf, " dst_prefixlen=%d", 3969 sel->prefixlen_d); 3970 break; 3971 case AF_INET6: 3972 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); 3973 if (sel->prefixlen_s != 128) 3974 audit_log_format(audit_buf, " src_prefixlen=%d", 3975 sel->prefixlen_s); 3976 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); 3977 if (sel->prefixlen_d != 128) 3978 audit_log_format(audit_buf, " dst_prefixlen=%d", 3979 sel->prefixlen_d); 3980 break; 3981 } 3982 } 3983 3984 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid) 3985 { 3986 struct audit_buffer *audit_buf; 3987 3988 audit_buf = xfrm_audit_start("SPD-add"); 3989 if (audit_buf == NULL) 3990 return; 3991 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 3992 audit_log_format(audit_buf, " res=%u", result); 3993 xfrm_audit_common_policyinfo(xp, audit_buf); 3994 audit_log_end(audit_buf); 3995 } 3996 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 3997 3998 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 3999 bool task_valid) 4000 { 4001 struct audit_buffer *audit_buf; 4002 4003 audit_buf = xfrm_audit_start("SPD-delete"); 4004 if (audit_buf == NULL) 4005 return; 4006 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 4007 audit_log_format(audit_buf, " res=%u", result); 4008 xfrm_audit_common_policyinfo(xp, audit_buf); 4009 audit_log_end(audit_buf); 4010 } 4011 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 4012 #endif 4013 4014 #ifdef CONFIG_XFRM_MIGRATE 4015 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 4016 const struct xfrm_selector *sel_tgt) 4017 { 4018 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 4019 if (sel_tgt->family == sel_cmp->family && 4020 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr, 4021 sel_cmp->family) && 4022 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr, 4023 sel_cmp->family) && 4024 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 4025 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 4026 return true; 4027 } 4028 } else { 4029 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 4030 return true; 4031 } 4032 } 4033 return false; 4034 } 4035 4036 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel, 4037 u8 dir, u8 type, struct net *net) 4038 { 4039 struct xfrm_policy *pol, *ret = NULL; 4040 struct hlist_head *chain; 4041 u32 priority = ~0U; 4042 4043 spin_lock_bh(&net->xfrm.xfrm_policy_lock); 4044 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir); 4045 hlist_for_each_entry(pol, chain, bydst) { 4046 if (xfrm_migrate_selector_match(sel, &pol->selector) && 4047 pol->type == type) { 4048 ret = pol; 4049 priority = ret->priority; 4050 break; 4051 } 4052 } 4053 chain = &net->xfrm.policy_inexact[dir]; 4054 hlist_for_each_entry(pol, chain, bydst_inexact_list) { 4055 if ((pol->priority >= priority) && ret) 4056 break; 4057 4058 if (xfrm_migrate_selector_match(sel, &pol->selector) && 4059 pol->type == type) { 4060 ret = pol; 4061 break; 4062 } 4063 } 4064 4065 xfrm_pol_hold(ret); 4066 4067 spin_unlock_bh(&net->xfrm.xfrm_policy_lock); 4068 4069 return ret; 4070 } 4071 4072 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) 4073 { 4074 int match = 0; 4075 4076 if (t->mode == m->mode && t->id.proto == m->proto && 4077 (m->reqid == 0 || t->reqid == m->reqid)) { 4078 switch (t->mode) { 4079 case XFRM_MODE_TUNNEL: 4080 case XFRM_MODE_BEET: 4081 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr, 4082 m->old_family) && 4083 xfrm_addr_equal(&t->saddr, &m->old_saddr, 4084 m->old_family)) { 4085 match = 1; 4086 } 4087 break; 4088 case XFRM_MODE_TRANSPORT: 4089 /* in case of transport mode, template does not store 4090 any IP addresses, hence we just compare mode and 4091 protocol */ 4092 match = 1; 4093 break; 4094 default: 4095 break; 4096 } 4097 } 4098 return match; 4099 } 4100 4101 /* update endpoint address(es) of template(s) */ 4102 static int xfrm_policy_migrate(struct xfrm_policy *pol, 4103 struct xfrm_migrate *m, int num_migrate) 4104 { 4105 struct xfrm_migrate *mp; 4106 int i, j, n = 0; 4107 4108 write_lock_bh(&pol->lock); 4109 if (unlikely(pol->walk.dead)) { 4110 /* target policy has been deleted */ 4111 write_unlock_bh(&pol->lock); 4112 return -ENOENT; 4113 } 4114 4115 for (i = 0; i < pol->xfrm_nr; i++) { 4116 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 4117 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 4118 continue; 4119 n++; 4120 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 4121 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 4122 continue; 4123 /* update endpoints */ 4124 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 4125 sizeof(pol->xfrm_vec[i].id.daddr)); 4126 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 4127 sizeof(pol->xfrm_vec[i].saddr)); 4128 pol->xfrm_vec[i].encap_family = mp->new_family; 4129 /* flush bundles */ 4130 atomic_inc(&pol->genid); 4131 } 4132 } 4133 4134 write_unlock_bh(&pol->lock); 4135 4136 if (!n) 4137 return -ENODATA; 4138 4139 return 0; 4140 } 4141 4142 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) 4143 { 4144 int i, j; 4145 4146 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) 4147 return -EINVAL; 4148 4149 for (i = 0; i < num_migrate; i++) { 4150 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 4151 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 4152 return -EINVAL; 4153 4154 /* check if there is any duplicated entry */ 4155 for (j = i + 1; j < num_migrate; j++) { 4156 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 4157 sizeof(m[i].old_daddr)) && 4158 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 4159 sizeof(m[i].old_saddr)) && 4160 m[i].proto == m[j].proto && 4161 m[i].mode == m[j].mode && 4162 m[i].reqid == m[j].reqid && 4163 m[i].old_family == m[j].old_family) 4164 return -EINVAL; 4165 } 4166 } 4167 4168 return 0; 4169 } 4170 4171 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 4172 struct xfrm_migrate *m, int num_migrate, 4173 struct xfrm_kmaddress *k, struct net *net, 4174 struct xfrm_encap_tmpl *encap) 4175 { 4176 int i, err, nx_cur = 0, nx_new = 0; 4177 struct xfrm_policy *pol = NULL; 4178 struct xfrm_state *x, *xc; 4179 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 4180 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 4181 struct xfrm_migrate *mp; 4182 4183 /* Stage 0 - sanity checks */ 4184 if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 4185 goto out; 4186 4187 if (dir >= XFRM_POLICY_MAX) { 4188 err = -EINVAL; 4189 goto out; 4190 } 4191 4192 /* Stage 1 - find policy */ 4193 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { 4194 err = -ENOENT; 4195 goto out; 4196 } 4197 4198 /* Stage 2 - find and update state(s) */ 4199 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 4200 if ((x = xfrm_migrate_state_find(mp, net))) { 4201 x_cur[nx_cur] = x; 4202 nx_cur++; 4203 xc = xfrm_state_migrate(x, mp, encap); 4204 if (xc) { 4205 x_new[nx_new] = xc; 4206 nx_new++; 4207 } else { 4208 err = -ENODATA; 4209 goto restore_state; 4210 } 4211 } 4212 } 4213 4214 /* Stage 3 - update policy */ 4215 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) 4216 goto restore_state; 4217 4218 /* Stage 4 - delete old state(s) */ 4219 if (nx_cur) { 4220 xfrm_states_put(x_cur, nx_cur); 4221 xfrm_states_delete(x_cur, nx_cur); 4222 } 4223 4224 /* Stage 5 - announce */ 4225 km_migrate(sel, dir, type, m, num_migrate, k, encap); 4226 4227 xfrm_pol_put(pol); 4228 4229 return 0; 4230 out: 4231 return err; 4232 4233 restore_state: 4234 if (pol) 4235 xfrm_pol_put(pol); 4236 if (nx_cur) 4237 xfrm_states_put(x_cur, nx_cur); 4238 if (nx_new) 4239 xfrm_states_delete(x_new, nx_new); 4240 4241 return err; 4242 } 4243 EXPORT_SYMBOL(xfrm_migrate); 4244 #endif 4245