1 /* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/kmod.h> 19 #include <linux/list.h> 20 #include <linux/spinlock.h> 21 #include <linux/workqueue.h> 22 #include <linux/notifier.h> 23 #include <linux/netdevice.h> 24 #include <linux/netfilter.h> 25 #include <linux/module.h> 26 #include <linux/cache.h> 27 #include <linux/audit.h> 28 #include <net/dst.h> 29 #include <net/xfrm.h> 30 #include <net/ip.h> 31 #ifdef CONFIG_XFRM_STATISTICS 32 #include <net/snmp.h> 33 #endif 34 35 #include "xfrm_hash.h" 36 37 DEFINE_MUTEX(xfrm_cfg_mutex); 38 EXPORT_SYMBOL(xfrm_cfg_mutex); 39 40 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); 41 static struct dst_entry *xfrm_policy_sk_bundles; 42 static DEFINE_RWLOCK(xfrm_policy_lock); 43 44 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 45 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 46 47 static struct kmem_cache *xfrm_dst_cache __read_mostly; 48 49 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); 50 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 51 static void xfrm_init_pmtu(struct dst_entry *dst); 52 static int stale_bundle(struct dst_entry *dst); 53 static int xfrm_bundle_ok(struct xfrm_dst *xdst); 54 55 56 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 57 int dir); 58 59 static inline bool 60 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 61 { 62 const struct flowi4 *fl4 = &fl->u.ip4; 63 64 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && 65 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && 66 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && 67 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && 68 (fl4->flowi4_proto == sel->proto || !sel->proto) && 69 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 70 } 71 72 static inline bool 73 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 74 { 75 const struct flowi6 *fl6 = &fl->u.ip6; 76 77 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && 78 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && 79 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && 80 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && 81 (fl6->flowi6_proto == sel->proto || !sel->proto) && 82 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 83 } 84 85 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 86 unsigned short family) 87 { 88 switch (family) { 89 case AF_INET: 90 return __xfrm4_selector_match(sel, fl); 91 case AF_INET6: 92 return __xfrm6_selector_match(sel, fl); 93 } 94 return false; 95 } 96 97 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, 98 const xfrm_address_t *saddr, 99 const xfrm_address_t *daddr, 100 int family) 101 { 102 struct xfrm_policy_afinfo *afinfo; 103 struct dst_entry *dst; 104 105 afinfo = xfrm_policy_get_afinfo(family); 106 if (unlikely(afinfo == NULL)) 107 return ERR_PTR(-EAFNOSUPPORT); 108 109 dst = afinfo->dst_lookup(net, tos, saddr, daddr); 110 111 xfrm_policy_put_afinfo(afinfo); 112 113 return dst; 114 } 115 116 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, 117 xfrm_address_t *prev_saddr, 118 xfrm_address_t *prev_daddr, 119 int family) 120 { 121 struct net *net = xs_net(x); 122 xfrm_address_t *saddr = &x->props.saddr; 123 xfrm_address_t *daddr = &x->id.daddr; 124 struct dst_entry *dst; 125 126 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 127 saddr = x->coaddr; 128 daddr = prev_daddr; 129 } 130 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 131 saddr = prev_saddr; 132 daddr = x->coaddr; 133 } 134 135 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family); 136 137 if (!IS_ERR(dst)) { 138 if (prev_saddr != saddr) 139 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 140 if (prev_daddr != daddr) 141 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 142 } 143 144 return dst; 145 } 146 147 static inline unsigned long make_jiffies(long secs) 148 { 149 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 150 return MAX_SCHEDULE_TIMEOUT-1; 151 else 152 return secs*HZ; 153 } 154 155 static void xfrm_policy_timer(unsigned long data) 156 { 157 struct xfrm_policy *xp = (struct xfrm_policy*)data; 158 unsigned long now = get_seconds(); 159 long next = LONG_MAX; 160 int warn = 0; 161 int dir; 162 163 read_lock(&xp->lock); 164 165 if (unlikely(xp->walk.dead)) 166 goto out; 167 168 dir = xfrm_policy_id2dir(xp->index); 169 170 if (xp->lft.hard_add_expires_seconds) { 171 long tmo = xp->lft.hard_add_expires_seconds + 172 xp->curlft.add_time - now; 173 if (tmo <= 0) 174 goto expired; 175 if (tmo < next) 176 next = tmo; 177 } 178 if (xp->lft.hard_use_expires_seconds) { 179 long tmo = xp->lft.hard_use_expires_seconds + 180 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 181 if (tmo <= 0) 182 goto expired; 183 if (tmo < next) 184 next = tmo; 185 } 186 if (xp->lft.soft_add_expires_seconds) { 187 long tmo = xp->lft.soft_add_expires_seconds + 188 xp->curlft.add_time - now; 189 if (tmo <= 0) { 190 warn = 1; 191 tmo = XFRM_KM_TIMEOUT; 192 } 193 if (tmo < next) 194 next = tmo; 195 } 196 if (xp->lft.soft_use_expires_seconds) { 197 long tmo = xp->lft.soft_use_expires_seconds + 198 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 199 if (tmo <= 0) { 200 warn = 1; 201 tmo = XFRM_KM_TIMEOUT; 202 } 203 if (tmo < next) 204 next = tmo; 205 } 206 207 if (warn) 208 km_policy_expired(xp, dir, 0, 0); 209 if (next != LONG_MAX && 210 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 211 xfrm_pol_hold(xp); 212 213 out: 214 read_unlock(&xp->lock); 215 xfrm_pol_put(xp); 216 return; 217 218 expired: 219 read_unlock(&xp->lock); 220 if (!xfrm_policy_delete(xp, dir)) 221 km_policy_expired(xp, dir, 1, 0); 222 xfrm_pol_put(xp); 223 } 224 225 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) 226 { 227 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 228 229 if (unlikely(pol->walk.dead)) 230 flo = NULL; 231 else 232 xfrm_pol_hold(pol); 233 234 return flo; 235 } 236 237 static int xfrm_policy_flo_check(struct flow_cache_object *flo) 238 { 239 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 240 241 return !pol->walk.dead; 242 } 243 244 static void xfrm_policy_flo_delete(struct flow_cache_object *flo) 245 { 246 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); 247 } 248 249 static const struct flow_cache_ops xfrm_policy_fc_ops = { 250 .get = xfrm_policy_flo_get, 251 .check = xfrm_policy_flo_check, 252 .delete = xfrm_policy_flo_delete, 253 }; 254 255 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 256 * SPD calls. 257 */ 258 259 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) 260 { 261 struct xfrm_policy *policy; 262 263 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 264 265 if (policy) { 266 write_pnet(&policy->xp_net, net); 267 INIT_LIST_HEAD(&policy->walk.all); 268 INIT_HLIST_NODE(&policy->bydst); 269 INIT_HLIST_NODE(&policy->byidx); 270 rwlock_init(&policy->lock); 271 atomic_set(&policy->refcnt, 1); 272 setup_timer(&policy->timer, xfrm_policy_timer, 273 (unsigned long)policy); 274 policy->flo.ops = &xfrm_policy_fc_ops; 275 } 276 return policy; 277 } 278 EXPORT_SYMBOL(xfrm_policy_alloc); 279 280 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 281 282 void xfrm_policy_destroy(struct xfrm_policy *policy) 283 { 284 BUG_ON(!policy->walk.dead); 285 286 if (del_timer(&policy->timer)) 287 BUG(); 288 289 security_xfrm_policy_free(policy->security); 290 kfree(policy); 291 } 292 EXPORT_SYMBOL(xfrm_policy_destroy); 293 294 /* Rule must be locked. Release descentant resources, announce 295 * entry dead. The rule must be unlinked from lists to the moment. 296 */ 297 298 static void xfrm_policy_kill(struct xfrm_policy *policy) 299 { 300 policy->walk.dead = 1; 301 302 atomic_inc(&policy->genid); 303 304 if (del_timer(&policy->timer)) 305 xfrm_pol_put(policy); 306 307 xfrm_pol_put(policy); 308 } 309 310 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 311 312 static inline unsigned int idx_hash(struct net *net, u32 index) 313 { 314 return __idx_hash(index, net->xfrm.policy_idx_hmask); 315 } 316 317 static struct hlist_head *policy_hash_bysel(struct net *net, 318 const struct xfrm_selector *sel, 319 unsigned short family, int dir) 320 { 321 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 322 unsigned int hash = __sel_hash(sel, family, hmask); 323 324 return (hash == hmask + 1 ? 325 &net->xfrm.policy_inexact[dir] : 326 net->xfrm.policy_bydst[dir].table + hash); 327 } 328 329 static struct hlist_head *policy_hash_direct(struct net *net, 330 const xfrm_address_t *daddr, 331 const xfrm_address_t *saddr, 332 unsigned short family, int dir) 333 { 334 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 335 unsigned int hash = __addr_hash(daddr, saddr, family, hmask); 336 337 return net->xfrm.policy_bydst[dir].table + hash; 338 } 339 340 static void xfrm_dst_hash_transfer(struct hlist_head *list, 341 struct hlist_head *ndsttable, 342 unsigned int nhashmask) 343 { 344 struct hlist_node *entry, *tmp, *entry0 = NULL; 345 struct xfrm_policy *pol; 346 unsigned int h0 = 0; 347 348 redo: 349 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { 350 unsigned int h; 351 352 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 353 pol->family, nhashmask); 354 if (!entry0) { 355 hlist_del(entry); 356 hlist_add_head(&pol->bydst, ndsttable+h); 357 h0 = h; 358 } else { 359 if (h != h0) 360 continue; 361 hlist_del(entry); 362 hlist_add_after(entry0, &pol->bydst); 363 } 364 entry0 = entry; 365 } 366 if (!hlist_empty(list)) { 367 entry0 = NULL; 368 goto redo; 369 } 370 } 371 372 static void xfrm_idx_hash_transfer(struct hlist_head *list, 373 struct hlist_head *nidxtable, 374 unsigned int nhashmask) 375 { 376 struct hlist_node *entry, *tmp; 377 struct xfrm_policy *pol; 378 379 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { 380 unsigned int h; 381 382 h = __idx_hash(pol->index, nhashmask); 383 hlist_add_head(&pol->byidx, nidxtable+h); 384 } 385 } 386 387 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 388 { 389 return ((old_hmask + 1) << 1) - 1; 390 } 391 392 static void xfrm_bydst_resize(struct net *net, int dir) 393 { 394 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 395 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 396 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 397 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table; 398 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 399 int i; 400 401 if (!ndst) 402 return; 403 404 write_lock_bh(&xfrm_policy_lock); 405 406 for (i = hmask; i >= 0; i--) 407 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); 408 409 net->xfrm.policy_bydst[dir].table = ndst; 410 net->xfrm.policy_bydst[dir].hmask = nhashmask; 411 412 write_unlock_bh(&xfrm_policy_lock); 413 414 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 415 } 416 417 static void xfrm_byidx_resize(struct net *net, int total) 418 { 419 unsigned int hmask = net->xfrm.policy_idx_hmask; 420 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 421 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 422 struct hlist_head *oidx = net->xfrm.policy_byidx; 423 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 424 int i; 425 426 if (!nidx) 427 return; 428 429 write_lock_bh(&xfrm_policy_lock); 430 431 for (i = hmask; i >= 0; i--) 432 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 433 434 net->xfrm.policy_byidx = nidx; 435 net->xfrm.policy_idx_hmask = nhashmask; 436 437 write_unlock_bh(&xfrm_policy_lock); 438 439 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 440 } 441 442 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) 443 { 444 unsigned int cnt = net->xfrm.policy_count[dir]; 445 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 446 447 if (total) 448 *total += cnt; 449 450 if ((hmask + 1) < xfrm_policy_hashmax && 451 cnt > hmask) 452 return 1; 453 454 return 0; 455 } 456 457 static inline int xfrm_byidx_should_resize(struct net *net, int total) 458 { 459 unsigned int hmask = net->xfrm.policy_idx_hmask; 460 461 if ((hmask + 1) < xfrm_policy_hashmax && 462 total > hmask) 463 return 1; 464 465 return 0; 466 } 467 468 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 469 { 470 read_lock_bh(&xfrm_policy_lock); 471 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 472 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 473 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 474 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 475 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 476 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 477 si->spdhcnt = net->xfrm.policy_idx_hmask; 478 si->spdhmcnt = xfrm_policy_hashmax; 479 read_unlock_bh(&xfrm_policy_lock); 480 } 481 EXPORT_SYMBOL(xfrm_spd_getinfo); 482 483 static DEFINE_MUTEX(hash_resize_mutex); 484 static void xfrm_hash_resize(struct work_struct *work) 485 { 486 struct net *net = container_of(work, struct net, xfrm.policy_hash_work); 487 int dir, total; 488 489 mutex_lock(&hash_resize_mutex); 490 491 total = 0; 492 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 493 if (xfrm_bydst_should_resize(net, dir, &total)) 494 xfrm_bydst_resize(net, dir); 495 } 496 if (xfrm_byidx_should_resize(net, total)) 497 xfrm_byidx_resize(net, total); 498 499 mutex_unlock(&hash_resize_mutex); 500 } 501 502 /* Generate new index... KAME seems to generate them ordered by cost 503 * of an absolute inpredictability of ordering of rules. This will not pass. */ 504 static u32 xfrm_gen_index(struct net *net, int dir) 505 { 506 static u32 idx_generator; 507 508 for (;;) { 509 struct hlist_node *entry; 510 struct hlist_head *list; 511 struct xfrm_policy *p; 512 u32 idx; 513 int found; 514 515 idx = (idx_generator | dir); 516 idx_generator += 8; 517 if (idx == 0) 518 idx = 8; 519 list = net->xfrm.policy_byidx + idx_hash(net, idx); 520 found = 0; 521 hlist_for_each_entry(p, entry, list, byidx) { 522 if (p->index == idx) { 523 found = 1; 524 break; 525 } 526 } 527 if (!found) 528 return idx; 529 } 530 } 531 532 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 533 { 534 u32 *p1 = (u32 *) s1; 535 u32 *p2 = (u32 *) s2; 536 int len = sizeof(struct xfrm_selector) / sizeof(u32); 537 int i; 538 539 for (i = 0; i < len; i++) { 540 if (p1[i] != p2[i]) 541 return 1; 542 } 543 544 return 0; 545 } 546 547 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 548 { 549 struct net *net = xp_net(policy); 550 struct xfrm_policy *pol; 551 struct xfrm_policy *delpol; 552 struct hlist_head *chain; 553 struct hlist_node *entry, *newpos; 554 u32 mark = policy->mark.v & policy->mark.m; 555 556 write_lock_bh(&xfrm_policy_lock); 557 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 558 delpol = NULL; 559 newpos = NULL; 560 hlist_for_each_entry(pol, entry, chain, bydst) { 561 if (pol->type == policy->type && 562 !selector_cmp(&pol->selector, &policy->selector) && 563 (mark & pol->mark.m) == pol->mark.v && 564 xfrm_sec_ctx_match(pol->security, policy->security) && 565 !WARN_ON(delpol)) { 566 if (excl) { 567 write_unlock_bh(&xfrm_policy_lock); 568 return -EEXIST; 569 } 570 delpol = pol; 571 if (policy->priority > pol->priority) 572 continue; 573 } else if (policy->priority >= pol->priority) { 574 newpos = &pol->bydst; 575 continue; 576 } 577 if (delpol) 578 break; 579 } 580 if (newpos) 581 hlist_add_after(newpos, &policy->bydst); 582 else 583 hlist_add_head(&policy->bydst, chain); 584 xfrm_pol_hold(policy); 585 net->xfrm.policy_count[dir]++; 586 atomic_inc(&flow_cache_genid); 587 if (delpol) 588 __xfrm_policy_unlink(delpol, dir); 589 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); 590 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 591 policy->curlft.add_time = get_seconds(); 592 policy->curlft.use_time = 0; 593 if (!mod_timer(&policy->timer, jiffies + HZ)) 594 xfrm_pol_hold(policy); 595 list_add(&policy->walk.all, &net->xfrm.policy_all); 596 write_unlock_bh(&xfrm_policy_lock); 597 598 if (delpol) 599 xfrm_policy_kill(delpol); 600 else if (xfrm_bydst_should_resize(net, dir, NULL)) 601 schedule_work(&net->xfrm.policy_hash_work); 602 603 return 0; 604 } 605 EXPORT_SYMBOL(xfrm_policy_insert); 606 607 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, 608 int dir, struct xfrm_selector *sel, 609 struct xfrm_sec_ctx *ctx, int delete, 610 int *err) 611 { 612 struct xfrm_policy *pol, *ret; 613 struct hlist_head *chain; 614 struct hlist_node *entry; 615 616 *err = 0; 617 write_lock_bh(&xfrm_policy_lock); 618 chain = policy_hash_bysel(net, sel, sel->family, dir); 619 ret = NULL; 620 hlist_for_each_entry(pol, entry, chain, bydst) { 621 if (pol->type == type && 622 (mark & pol->mark.m) == pol->mark.v && 623 !selector_cmp(sel, &pol->selector) && 624 xfrm_sec_ctx_match(ctx, pol->security)) { 625 xfrm_pol_hold(pol); 626 if (delete) { 627 *err = security_xfrm_policy_delete( 628 pol->security); 629 if (*err) { 630 write_unlock_bh(&xfrm_policy_lock); 631 return pol; 632 } 633 __xfrm_policy_unlink(pol, dir); 634 } 635 ret = pol; 636 break; 637 } 638 } 639 write_unlock_bh(&xfrm_policy_lock); 640 641 if (ret && delete) 642 xfrm_policy_kill(ret); 643 return ret; 644 } 645 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 646 647 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, 648 int dir, u32 id, int delete, int *err) 649 { 650 struct xfrm_policy *pol, *ret; 651 struct hlist_head *chain; 652 struct hlist_node *entry; 653 654 *err = -ENOENT; 655 if (xfrm_policy_id2dir(id) != dir) 656 return NULL; 657 658 *err = 0; 659 write_lock_bh(&xfrm_policy_lock); 660 chain = net->xfrm.policy_byidx + idx_hash(net, id); 661 ret = NULL; 662 hlist_for_each_entry(pol, entry, chain, byidx) { 663 if (pol->type == type && pol->index == id && 664 (mark & pol->mark.m) == pol->mark.v) { 665 xfrm_pol_hold(pol); 666 if (delete) { 667 *err = security_xfrm_policy_delete( 668 pol->security); 669 if (*err) { 670 write_unlock_bh(&xfrm_policy_lock); 671 return pol; 672 } 673 __xfrm_policy_unlink(pol, dir); 674 } 675 ret = pol; 676 break; 677 } 678 } 679 write_unlock_bh(&xfrm_policy_lock); 680 681 if (ret && delete) 682 xfrm_policy_kill(ret); 683 return ret; 684 } 685 EXPORT_SYMBOL(xfrm_policy_byid); 686 687 #ifdef CONFIG_SECURITY_NETWORK_XFRM 688 static inline int 689 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 690 { 691 int dir, err = 0; 692 693 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 694 struct xfrm_policy *pol; 695 struct hlist_node *entry; 696 int i; 697 698 hlist_for_each_entry(pol, entry, 699 &net->xfrm.policy_inexact[dir], bydst) { 700 if (pol->type != type) 701 continue; 702 err = security_xfrm_policy_delete(pol->security); 703 if (err) { 704 xfrm_audit_policy_delete(pol, 0, 705 audit_info->loginuid, 706 audit_info->sessionid, 707 audit_info->secid); 708 return err; 709 } 710 } 711 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 712 hlist_for_each_entry(pol, entry, 713 net->xfrm.policy_bydst[dir].table + i, 714 bydst) { 715 if (pol->type != type) 716 continue; 717 err = security_xfrm_policy_delete( 718 pol->security); 719 if (err) { 720 xfrm_audit_policy_delete(pol, 0, 721 audit_info->loginuid, 722 audit_info->sessionid, 723 audit_info->secid); 724 return err; 725 } 726 } 727 } 728 } 729 return err; 730 } 731 #else 732 static inline int 733 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 734 { 735 return 0; 736 } 737 #endif 738 739 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 740 { 741 int dir, err = 0, cnt = 0; 742 743 write_lock_bh(&xfrm_policy_lock); 744 745 err = xfrm_policy_flush_secctx_check(net, type, audit_info); 746 if (err) 747 goto out; 748 749 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 750 struct xfrm_policy *pol; 751 struct hlist_node *entry; 752 int i; 753 754 again1: 755 hlist_for_each_entry(pol, entry, 756 &net->xfrm.policy_inexact[dir], bydst) { 757 if (pol->type != type) 758 continue; 759 __xfrm_policy_unlink(pol, dir); 760 write_unlock_bh(&xfrm_policy_lock); 761 cnt++; 762 763 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 764 audit_info->sessionid, 765 audit_info->secid); 766 767 xfrm_policy_kill(pol); 768 769 write_lock_bh(&xfrm_policy_lock); 770 goto again1; 771 } 772 773 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 774 again2: 775 hlist_for_each_entry(pol, entry, 776 net->xfrm.policy_bydst[dir].table + i, 777 bydst) { 778 if (pol->type != type) 779 continue; 780 __xfrm_policy_unlink(pol, dir); 781 write_unlock_bh(&xfrm_policy_lock); 782 cnt++; 783 784 xfrm_audit_policy_delete(pol, 1, 785 audit_info->loginuid, 786 audit_info->sessionid, 787 audit_info->secid); 788 xfrm_policy_kill(pol); 789 790 write_lock_bh(&xfrm_policy_lock); 791 goto again2; 792 } 793 } 794 795 } 796 if (!cnt) 797 err = -ESRCH; 798 out: 799 write_unlock_bh(&xfrm_policy_lock); 800 return err; 801 } 802 EXPORT_SYMBOL(xfrm_policy_flush); 803 804 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 805 int (*func)(struct xfrm_policy *, int, int, void*), 806 void *data) 807 { 808 struct xfrm_policy *pol; 809 struct xfrm_policy_walk_entry *x; 810 int error = 0; 811 812 if (walk->type >= XFRM_POLICY_TYPE_MAX && 813 walk->type != XFRM_POLICY_TYPE_ANY) 814 return -EINVAL; 815 816 if (list_empty(&walk->walk.all) && walk->seq != 0) 817 return 0; 818 819 write_lock_bh(&xfrm_policy_lock); 820 if (list_empty(&walk->walk.all)) 821 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); 822 else 823 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all); 824 list_for_each_entry_from(x, &net->xfrm.policy_all, all) { 825 if (x->dead) 826 continue; 827 pol = container_of(x, struct xfrm_policy, walk); 828 if (walk->type != XFRM_POLICY_TYPE_ANY && 829 walk->type != pol->type) 830 continue; 831 error = func(pol, xfrm_policy_id2dir(pol->index), 832 walk->seq, data); 833 if (error) { 834 list_move_tail(&walk->walk.all, &x->all); 835 goto out; 836 } 837 walk->seq++; 838 } 839 if (walk->seq == 0) { 840 error = -ENOENT; 841 goto out; 842 } 843 list_del_init(&walk->walk.all); 844 out: 845 write_unlock_bh(&xfrm_policy_lock); 846 return error; 847 } 848 EXPORT_SYMBOL(xfrm_policy_walk); 849 850 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) 851 { 852 INIT_LIST_HEAD(&walk->walk.all); 853 walk->walk.dead = 1; 854 walk->type = type; 855 walk->seq = 0; 856 } 857 EXPORT_SYMBOL(xfrm_policy_walk_init); 858 859 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk) 860 { 861 if (list_empty(&walk->walk.all)) 862 return; 863 864 write_lock_bh(&xfrm_policy_lock); 865 list_del(&walk->walk.all); 866 write_unlock_bh(&xfrm_policy_lock); 867 } 868 EXPORT_SYMBOL(xfrm_policy_walk_done); 869 870 /* 871 * Find policy to apply to this flow. 872 * 873 * Returns 0 if policy found, else an -errno. 874 */ 875 static int xfrm_policy_match(const struct xfrm_policy *pol, 876 const struct flowi *fl, 877 u8 type, u16 family, int dir) 878 { 879 const struct xfrm_selector *sel = &pol->selector; 880 int ret = -ESRCH; 881 bool match; 882 883 if (pol->family != family || 884 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 885 pol->type != type) 886 return ret; 887 888 match = xfrm_selector_match(sel, fl, family); 889 if (match) 890 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, 891 dir); 892 893 return ret; 894 } 895 896 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, 897 const struct flowi *fl, 898 u16 family, u8 dir) 899 { 900 int err; 901 struct xfrm_policy *pol, *ret; 902 const xfrm_address_t *daddr, *saddr; 903 struct hlist_node *entry; 904 struct hlist_head *chain; 905 u32 priority = ~0U; 906 907 daddr = xfrm_flowi_daddr(fl, family); 908 saddr = xfrm_flowi_saddr(fl, family); 909 if (unlikely(!daddr || !saddr)) 910 return NULL; 911 912 read_lock_bh(&xfrm_policy_lock); 913 chain = policy_hash_direct(net, daddr, saddr, family, dir); 914 ret = NULL; 915 hlist_for_each_entry(pol, entry, chain, bydst) { 916 err = xfrm_policy_match(pol, fl, type, family, dir); 917 if (err) { 918 if (err == -ESRCH) 919 continue; 920 else { 921 ret = ERR_PTR(err); 922 goto fail; 923 } 924 } else { 925 ret = pol; 926 priority = ret->priority; 927 break; 928 } 929 } 930 chain = &net->xfrm.policy_inexact[dir]; 931 hlist_for_each_entry(pol, entry, chain, bydst) { 932 err = xfrm_policy_match(pol, fl, type, family, dir); 933 if (err) { 934 if (err == -ESRCH) 935 continue; 936 else { 937 ret = ERR_PTR(err); 938 goto fail; 939 } 940 } else if (pol->priority < priority) { 941 ret = pol; 942 break; 943 } 944 } 945 if (ret) 946 xfrm_pol_hold(ret); 947 fail: 948 read_unlock_bh(&xfrm_policy_lock); 949 950 return ret; 951 } 952 953 static struct xfrm_policy * 954 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir) 955 { 956 #ifdef CONFIG_XFRM_SUB_POLICY 957 struct xfrm_policy *pol; 958 959 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); 960 if (pol != NULL) 961 return pol; 962 #endif 963 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); 964 } 965 966 static struct flow_cache_object * 967 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, 968 u8 dir, struct flow_cache_object *old_obj, void *ctx) 969 { 970 struct xfrm_policy *pol; 971 972 if (old_obj) 973 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); 974 975 pol = __xfrm_policy_lookup(net, fl, family, dir); 976 if (IS_ERR_OR_NULL(pol)) 977 return ERR_CAST(pol); 978 979 /* Resolver returns two references: 980 * one for cache and one for caller of flow_cache_lookup() */ 981 xfrm_pol_hold(pol); 982 983 return &pol->flo; 984 } 985 986 static inline int policy_to_flow_dir(int dir) 987 { 988 if (XFRM_POLICY_IN == FLOW_DIR_IN && 989 XFRM_POLICY_OUT == FLOW_DIR_OUT && 990 XFRM_POLICY_FWD == FLOW_DIR_FWD) 991 return dir; 992 switch (dir) { 993 default: 994 case XFRM_POLICY_IN: 995 return FLOW_DIR_IN; 996 case XFRM_POLICY_OUT: 997 return FLOW_DIR_OUT; 998 case XFRM_POLICY_FWD: 999 return FLOW_DIR_FWD; 1000 } 1001 } 1002 1003 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, 1004 const struct flowi *fl) 1005 { 1006 struct xfrm_policy *pol; 1007 1008 read_lock_bh(&xfrm_policy_lock); 1009 if ((pol = sk->sk_policy[dir]) != NULL) { 1010 bool match = xfrm_selector_match(&pol->selector, fl, 1011 sk->sk_family); 1012 int err = 0; 1013 1014 if (match) { 1015 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { 1016 pol = NULL; 1017 goto out; 1018 } 1019 err = security_xfrm_policy_lookup(pol->security, 1020 fl->flowi_secid, 1021 policy_to_flow_dir(dir)); 1022 if (!err) 1023 xfrm_pol_hold(pol); 1024 else if (err == -ESRCH) 1025 pol = NULL; 1026 else 1027 pol = ERR_PTR(err); 1028 } else 1029 pol = NULL; 1030 } 1031 out: 1032 read_unlock_bh(&xfrm_policy_lock); 1033 return pol; 1034 } 1035 1036 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 1037 { 1038 struct net *net = xp_net(pol); 1039 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector, 1040 pol->family, dir); 1041 1042 list_add(&pol->walk.all, &net->xfrm.policy_all); 1043 hlist_add_head(&pol->bydst, chain); 1044 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index)); 1045 net->xfrm.policy_count[dir]++; 1046 xfrm_pol_hold(pol); 1047 1048 if (xfrm_bydst_should_resize(net, dir, NULL)) 1049 schedule_work(&net->xfrm.policy_hash_work); 1050 } 1051 1052 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 1053 int dir) 1054 { 1055 struct net *net = xp_net(pol); 1056 1057 if (hlist_unhashed(&pol->bydst)) 1058 return NULL; 1059 1060 hlist_del(&pol->bydst); 1061 hlist_del(&pol->byidx); 1062 list_del(&pol->walk.all); 1063 net->xfrm.policy_count[dir]--; 1064 1065 return pol; 1066 } 1067 1068 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 1069 { 1070 write_lock_bh(&xfrm_policy_lock); 1071 pol = __xfrm_policy_unlink(pol, dir); 1072 write_unlock_bh(&xfrm_policy_lock); 1073 if (pol) { 1074 xfrm_policy_kill(pol); 1075 return 0; 1076 } 1077 return -ENOENT; 1078 } 1079 EXPORT_SYMBOL(xfrm_policy_delete); 1080 1081 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 1082 { 1083 struct net *net = xp_net(pol); 1084 struct xfrm_policy *old_pol; 1085 1086 #ifdef CONFIG_XFRM_SUB_POLICY 1087 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 1088 return -EINVAL; 1089 #endif 1090 1091 write_lock_bh(&xfrm_policy_lock); 1092 old_pol = sk->sk_policy[dir]; 1093 sk->sk_policy[dir] = pol; 1094 if (pol) { 1095 pol->curlft.add_time = get_seconds(); 1096 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir); 1097 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1098 } 1099 if (old_pol) 1100 /* Unlinking succeeds always. This is the only function 1101 * allowed to delete or replace socket policy. 1102 */ 1103 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1104 write_unlock_bh(&xfrm_policy_lock); 1105 1106 if (old_pol) { 1107 xfrm_policy_kill(old_pol); 1108 } 1109 return 0; 1110 } 1111 1112 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) 1113 { 1114 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); 1115 1116 if (newp) { 1117 newp->selector = old->selector; 1118 if (security_xfrm_policy_clone(old->security, 1119 &newp->security)) { 1120 kfree(newp); 1121 return NULL; /* ENOMEM */ 1122 } 1123 newp->lft = old->lft; 1124 newp->curlft = old->curlft; 1125 newp->mark = old->mark; 1126 newp->action = old->action; 1127 newp->flags = old->flags; 1128 newp->xfrm_nr = old->xfrm_nr; 1129 newp->index = old->index; 1130 newp->type = old->type; 1131 memcpy(newp->xfrm_vec, old->xfrm_vec, 1132 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 1133 write_lock_bh(&xfrm_policy_lock); 1134 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); 1135 write_unlock_bh(&xfrm_policy_lock); 1136 xfrm_pol_put(newp); 1137 } 1138 return newp; 1139 } 1140 1141 int __xfrm_sk_clone_policy(struct sock *sk) 1142 { 1143 struct xfrm_policy *p0 = sk->sk_policy[0], 1144 *p1 = sk->sk_policy[1]; 1145 1146 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 1147 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 1148 return -ENOMEM; 1149 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 1150 return -ENOMEM; 1151 return 0; 1152 } 1153 1154 static int 1155 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote, 1156 unsigned short family) 1157 { 1158 int err; 1159 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1160 1161 if (unlikely(afinfo == NULL)) 1162 return -EINVAL; 1163 err = afinfo->get_saddr(net, local, remote); 1164 xfrm_policy_put_afinfo(afinfo); 1165 return err; 1166 } 1167 1168 /* Resolve list of templates for the flow, given policy. */ 1169 1170 static int 1171 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, 1172 struct xfrm_state **xfrm, unsigned short family) 1173 { 1174 struct net *net = xp_net(policy); 1175 int nx; 1176 int i, error; 1177 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 1178 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 1179 xfrm_address_t tmp; 1180 1181 for (nx=0, i = 0; i < policy->xfrm_nr; i++) { 1182 struct xfrm_state *x; 1183 xfrm_address_t *remote = daddr; 1184 xfrm_address_t *local = saddr; 1185 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 1186 1187 if (tmpl->mode == XFRM_MODE_TUNNEL || 1188 tmpl->mode == XFRM_MODE_BEET) { 1189 remote = &tmpl->id.daddr; 1190 local = &tmpl->saddr; 1191 if (xfrm_addr_any(local, tmpl->encap_family)) { 1192 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); 1193 if (error) 1194 goto fail; 1195 local = &tmp; 1196 } 1197 } 1198 1199 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 1200 1201 if (x && x->km.state == XFRM_STATE_VALID) { 1202 xfrm[nx++] = x; 1203 daddr = remote; 1204 saddr = local; 1205 continue; 1206 } 1207 if (x) { 1208 error = (x->km.state == XFRM_STATE_ERROR ? 1209 -EINVAL : -EAGAIN); 1210 xfrm_state_put(x); 1211 } 1212 else if (error == -ESRCH) 1213 error = -EAGAIN; 1214 1215 if (!tmpl->optional) 1216 goto fail; 1217 } 1218 return nx; 1219 1220 fail: 1221 for (nx--; nx>=0; nx--) 1222 xfrm_state_put(xfrm[nx]); 1223 return error; 1224 } 1225 1226 static int 1227 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, 1228 struct xfrm_state **xfrm, unsigned short family) 1229 { 1230 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 1231 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 1232 int cnx = 0; 1233 int error; 1234 int ret; 1235 int i; 1236 1237 for (i = 0; i < npols; i++) { 1238 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 1239 error = -ENOBUFS; 1240 goto fail; 1241 } 1242 1243 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 1244 if (ret < 0) { 1245 error = ret; 1246 goto fail; 1247 } else 1248 cnx += ret; 1249 } 1250 1251 /* found states are sorted for outbound processing */ 1252 if (npols > 1) 1253 xfrm_state_sort(xfrm, tpp, cnx, family); 1254 1255 return cnx; 1256 1257 fail: 1258 for (cnx--; cnx>=0; cnx--) 1259 xfrm_state_put(tpp[cnx]); 1260 return error; 1261 1262 } 1263 1264 /* Check that the bundle accepts the flow and its components are 1265 * still valid. 1266 */ 1267 1268 static inline int xfrm_get_tos(const struct flowi *fl, int family) 1269 { 1270 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1271 int tos; 1272 1273 if (!afinfo) 1274 return -EINVAL; 1275 1276 tos = afinfo->get_tos(fl); 1277 1278 xfrm_policy_put_afinfo(afinfo); 1279 1280 return tos; 1281 } 1282 1283 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) 1284 { 1285 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1286 struct dst_entry *dst = &xdst->u.dst; 1287 1288 if (xdst->route == NULL) { 1289 /* Dummy bundle - if it has xfrms we were not 1290 * able to build bundle as template resolution failed. 1291 * It means we need to try again resolving. */ 1292 if (xdst->num_xfrms > 0) 1293 return NULL; 1294 } else { 1295 /* Real bundle */ 1296 if (stale_bundle(dst)) 1297 return NULL; 1298 } 1299 1300 dst_hold(dst); 1301 return flo; 1302 } 1303 1304 static int xfrm_bundle_flo_check(struct flow_cache_object *flo) 1305 { 1306 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1307 struct dst_entry *dst = &xdst->u.dst; 1308 1309 if (!xdst->route) 1310 return 0; 1311 if (stale_bundle(dst)) 1312 return 0; 1313 1314 return 1; 1315 } 1316 1317 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) 1318 { 1319 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1320 struct dst_entry *dst = &xdst->u.dst; 1321 1322 dst_free(dst); 1323 } 1324 1325 static const struct flow_cache_ops xfrm_bundle_fc_ops = { 1326 .get = xfrm_bundle_flo_get, 1327 .check = xfrm_bundle_flo_check, 1328 .delete = xfrm_bundle_flo_delete, 1329 }; 1330 1331 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 1332 { 1333 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1334 struct dst_ops *dst_ops; 1335 struct xfrm_dst *xdst; 1336 1337 if (!afinfo) 1338 return ERR_PTR(-EINVAL); 1339 1340 switch (family) { 1341 case AF_INET: 1342 dst_ops = &net->xfrm.xfrm4_dst_ops; 1343 break; 1344 #if IS_ENABLED(CONFIG_IPV6) 1345 case AF_INET6: 1346 dst_ops = &net->xfrm.xfrm6_dst_ops; 1347 break; 1348 #endif 1349 default: 1350 BUG(); 1351 } 1352 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); 1353 1354 if (likely(xdst)) { 1355 memset(&xdst->u.rt6.rt6i_table, 0, 1356 sizeof(*xdst) - sizeof(struct dst_entry)); 1357 xdst->flo.ops = &xfrm_bundle_fc_ops; 1358 } else 1359 xdst = ERR_PTR(-ENOBUFS); 1360 1361 xfrm_policy_put_afinfo(afinfo); 1362 1363 return xdst; 1364 } 1365 1366 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 1367 int nfheader_len) 1368 { 1369 struct xfrm_policy_afinfo *afinfo = 1370 xfrm_policy_get_afinfo(dst->ops->family); 1371 int err; 1372 1373 if (!afinfo) 1374 return -EINVAL; 1375 1376 err = afinfo->init_path(path, dst, nfheader_len); 1377 1378 xfrm_policy_put_afinfo(afinfo); 1379 1380 return err; 1381 } 1382 1383 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, 1384 const struct flowi *fl) 1385 { 1386 struct xfrm_policy_afinfo *afinfo = 1387 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 1388 int err; 1389 1390 if (!afinfo) 1391 return -EINVAL; 1392 1393 err = afinfo->fill_dst(xdst, dev, fl); 1394 1395 xfrm_policy_put_afinfo(afinfo); 1396 1397 return err; 1398 } 1399 1400 1401 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 1402 * all the metrics... Shortly, bundle a bundle. 1403 */ 1404 1405 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 1406 struct xfrm_state **xfrm, int nx, 1407 const struct flowi *fl, 1408 struct dst_entry *dst) 1409 { 1410 struct net *net = xp_net(policy); 1411 unsigned long now = jiffies; 1412 struct net_device *dev; 1413 struct xfrm_mode *inner_mode; 1414 struct dst_entry *dst_prev = NULL; 1415 struct dst_entry *dst0 = NULL; 1416 int i = 0; 1417 int err; 1418 int header_len = 0; 1419 int nfheader_len = 0; 1420 int trailer_len = 0; 1421 int tos; 1422 int family = policy->selector.family; 1423 xfrm_address_t saddr, daddr; 1424 1425 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 1426 1427 tos = xfrm_get_tos(fl, family); 1428 err = tos; 1429 if (tos < 0) 1430 goto put_states; 1431 1432 dst_hold(dst); 1433 1434 for (; i < nx; i++) { 1435 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 1436 struct dst_entry *dst1 = &xdst->u.dst; 1437 1438 err = PTR_ERR(xdst); 1439 if (IS_ERR(xdst)) { 1440 dst_release(dst); 1441 goto put_states; 1442 } 1443 1444 if (xfrm[i]->sel.family == AF_UNSPEC) { 1445 inner_mode = xfrm_ip2inner_mode(xfrm[i], 1446 xfrm_af2proto(family)); 1447 if (!inner_mode) { 1448 err = -EAFNOSUPPORT; 1449 dst_release(dst); 1450 goto put_states; 1451 } 1452 } else 1453 inner_mode = xfrm[i]->inner_mode; 1454 1455 if (!dst_prev) 1456 dst0 = dst1; 1457 else { 1458 dst_prev->child = dst_clone(dst1); 1459 dst1->flags |= DST_NOHASH; 1460 } 1461 1462 xdst->route = dst; 1463 dst_copy_metrics(dst1, dst); 1464 1465 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 1466 family = xfrm[i]->props.family; 1467 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr, 1468 family); 1469 err = PTR_ERR(dst); 1470 if (IS_ERR(dst)) 1471 goto put_states; 1472 } else 1473 dst_hold(dst); 1474 1475 dst1->xfrm = xfrm[i]; 1476 xdst->xfrm_genid = xfrm[i]->genid; 1477 1478 dst1->obsolete = -1; 1479 dst1->flags |= DST_HOST; 1480 dst1->lastuse = now; 1481 1482 dst1->input = dst_discard; 1483 dst1->output = inner_mode->afinfo->output; 1484 1485 dst1->next = dst_prev; 1486 dst_prev = dst1; 1487 1488 header_len += xfrm[i]->props.header_len; 1489 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 1490 nfheader_len += xfrm[i]->props.header_len; 1491 trailer_len += xfrm[i]->props.trailer_len; 1492 } 1493 1494 dst_prev->child = dst; 1495 dst0->path = dst; 1496 1497 err = -ENODEV; 1498 dev = dst->dev; 1499 if (!dev) 1500 goto free_dst; 1501 1502 /* Copy neighbour for reachability confirmation */ 1503 dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst))); 1504 1505 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1506 xfrm_init_pmtu(dst_prev); 1507 1508 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { 1509 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; 1510 1511 err = xfrm_fill_dst(xdst, dev, fl); 1512 if (err) 1513 goto free_dst; 1514 1515 dst_prev->header_len = header_len; 1516 dst_prev->trailer_len = trailer_len; 1517 header_len -= xdst->u.dst.xfrm->props.header_len; 1518 trailer_len -= xdst->u.dst.xfrm->props.trailer_len; 1519 } 1520 1521 out: 1522 return dst0; 1523 1524 put_states: 1525 for (; i < nx; i++) 1526 xfrm_state_put(xfrm[i]); 1527 free_dst: 1528 if (dst0) 1529 dst_free(dst0); 1530 dst0 = ERR_PTR(err); 1531 goto out; 1532 } 1533 1534 static int inline 1535 xfrm_dst_alloc_copy(void **target, const void *src, int size) 1536 { 1537 if (!*target) { 1538 *target = kmalloc(size, GFP_ATOMIC); 1539 if (!*target) 1540 return -ENOMEM; 1541 } 1542 memcpy(*target, src, size); 1543 return 0; 1544 } 1545 1546 static int inline 1547 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel) 1548 { 1549 #ifdef CONFIG_XFRM_SUB_POLICY 1550 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1551 return xfrm_dst_alloc_copy((void **)&(xdst->partner), 1552 sel, sizeof(*sel)); 1553 #else 1554 return 0; 1555 #endif 1556 } 1557 1558 static int inline 1559 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl) 1560 { 1561 #ifdef CONFIG_XFRM_SUB_POLICY 1562 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1563 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); 1564 #else 1565 return 0; 1566 #endif 1567 } 1568 1569 static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1570 struct xfrm_policy **pols, 1571 int *num_pols, int *num_xfrms) 1572 { 1573 int i; 1574 1575 if (*num_pols == 0 || !pols[0]) { 1576 *num_pols = 0; 1577 *num_xfrms = 0; 1578 return 0; 1579 } 1580 if (IS_ERR(pols[0])) 1581 return PTR_ERR(pols[0]); 1582 1583 *num_xfrms = pols[0]->xfrm_nr; 1584 1585 #ifdef CONFIG_XFRM_SUB_POLICY 1586 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && 1587 pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 1588 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), 1589 XFRM_POLICY_TYPE_MAIN, 1590 fl, family, 1591 XFRM_POLICY_OUT); 1592 if (pols[1]) { 1593 if (IS_ERR(pols[1])) { 1594 xfrm_pols_put(pols, *num_pols); 1595 return PTR_ERR(pols[1]); 1596 } 1597 (*num_pols) ++; 1598 (*num_xfrms) += pols[1]->xfrm_nr; 1599 } 1600 } 1601 #endif 1602 for (i = 0; i < *num_pols; i++) { 1603 if (pols[i]->action != XFRM_POLICY_ALLOW) { 1604 *num_xfrms = -1; 1605 break; 1606 } 1607 } 1608 1609 return 0; 1610 1611 } 1612 1613 static struct xfrm_dst * 1614 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, 1615 const struct flowi *fl, u16 family, 1616 struct dst_entry *dst_orig) 1617 { 1618 struct net *net = xp_net(pols[0]); 1619 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 1620 struct dst_entry *dst; 1621 struct xfrm_dst *xdst; 1622 int err; 1623 1624 /* Try to instantiate a bundle */ 1625 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1626 if (err <= 0) { 1627 if (err != 0 && err != -EAGAIN) 1628 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1629 return ERR_PTR(err); 1630 } 1631 1632 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); 1633 if (IS_ERR(dst)) { 1634 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); 1635 return ERR_CAST(dst); 1636 } 1637 1638 xdst = (struct xfrm_dst *)dst; 1639 xdst->num_xfrms = err; 1640 if (num_pols > 1) 1641 err = xfrm_dst_update_parent(dst, &pols[1]->selector); 1642 else 1643 err = xfrm_dst_update_origin(dst, fl); 1644 if (unlikely(err)) { 1645 dst_free(dst); 1646 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); 1647 return ERR_PTR(err); 1648 } 1649 1650 xdst->num_pols = num_pols; 1651 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1652 xdst->policy_genid = atomic_read(&pols[0]->genid); 1653 1654 return xdst; 1655 } 1656 1657 static struct flow_cache_object * 1658 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, 1659 struct flow_cache_object *oldflo, void *ctx) 1660 { 1661 struct dst_entry *dst_orig = (struct dst_entry *)ctx; 1662 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1663 struct xfrm_dst *xdst, *new_xdst; 1664 int num_pols = 0, num_xfrms = 0, i, err, pol_dead; 1665 1666 /* Check if the policies from old bundle are usable */ 1667 xdst = NULL; 1668 if (oldflo) { 1669 xdst = container_of(oldflo, struct xfrm_dst, flo); 1670 num_pols = xdst->num_pols; 1671 num_xfrms = xdst->num_xfrms; 1672 pol_dead = 0; 1673 for (i = 0; i < num_pols; i++) { 1674 pols[i] = xdst->pols[i]; 1675 pol_dead |= pols[i]->walk.dead; 1676 } 1677 if (pol_dead) { 1678 dst_free(&xdst->u.dst); 1679 xdst = NULL; 1680 num_pols = 0; 1681 num_xfrms = 0; 1682 oldflo = NULL; 1683 } 1684 } 1685 1686 /* Resolve policies to use if we couldn't get them from 1687 * previous cache entry */ 1688 if (xdst == NULL) { 1689 num_pols = 1; 1690 pols[0] = __xfrm_policy_lookup(net, fl, family, dir); 1691 err = xfrm_expand_policies(fl, family, pols, 1692 &num_pols, &num_xfrms); 1693 if (err < 0) 1694 goto inc_error; 1695 if (num_pols == 0) 1696 return NULL; 1697 if (num_xfrms <= 0) 1698 goto make_dummy_bundle; 1699 } 1700 1701 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); 1702 if (IS_ERR(new_xdst)) { 1703 err = PTR_ERR(new_xdst); 1704 if (err != -EAGAIN) 1705 goto error; 1706 if (oldflo == NULL) 1707 goto make_dummy_bundle; 1708 dst_hold(&xdst->u.dst); 1709 return oldflo; 1710 } else if (new_xdst == NULL) { 1711 num_xfrms = 0; 1712 if (oldflo == NULL) 1713 goto make_dummy_bundle; 1714 xdst->num_xfrms = 0; 1715 dst_hold(&xdst->u.dst); 1716 return oldflo; 1717 } 1718 1719 /* Kill the previous bundle */ 1720 if (xdst) { 1721 /* The policies were stolen for newly generated bundle */ 1722 xdst->num_pols = 0; 1723 dst_free(&xdst->u.dst); 1724 } 1725 1726 /* Flow cache does not have reference, it dst_free()'s, 1727 * but we do need to return one reference for original caller */ 1728 dst_hold(&new_xdst->u.dst); 1729 return &new_xdst->flo; 1730 1731 make_dummy_bundle: 1732 /* We found policies, but there's no bundles to instantiate: 1733 * either because the policy blocks, has no transformations or 1734 * we could not build template (no xfrm_states).*/ 1735 xdst = xfrm_alloc_dst(net, family); 1736 if (IS_ERR(xdst)) { 1737 xfrm_pols_put(pols, num_pols); 1738 return ERR_CAST(xdst); 1739 } 1740 xdst->num_pols = num_pols; 1741 xdst->num_xfrms = num_xfrms; 1742 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1743 1744 dst_hold(&xdst->u.dst); 1745 return &xdst->flo; 1746 1747 inc_error: 1748 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1749 error: 1750 if (xdst != NULL) 1751 dst_free(&xdst->u.dst); 1752 else 1753 xfrm_pols_put(pols, num_pols); 1754 return ERR_PTR(err); 1755 } 1756 1757 static struct dst_entry *make_blackhole(struct net *net, u16 family, 1758 struct dst_entry *dst_orig) 1759 { 1760 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1761 struct dst_entry *ret; 1762 1763 if (!afinfo) { 1764 dst_release(dst_orig); 1765 ret = ERR_PTR(-EINVAL); 1766 } else { 1767 ret = afinfo->blackhole_route(net, dst_orig); 1768 } 1769 xfrm_policy_put_afinfo(afinfo); 1770 1771 return ret; 1772 } 1773 1774 /* Main function: finds/creates a bundle for given flow. 1775 * 1776 * At the moment we eat a raw IP route. Mostly to speed up lookups 1777 * on interfaces with disabled IPsec. 1778 */ 1779 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 1780 const struct flowi *fl, 1781 struct sock *sk, int flags) 1782 { 1783 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1784 struct flow_cache_object *flo; 1785 struct xfrm_dst *xdst; 1786 struct dst_entry *dst, *route; 1787 u16 family = dst_orig->ops->family; 1788 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 1789 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 1790 1791 restart: 1792 dst = NULL; 1793 xdst = NULL; 1794 route = NULL; 1795 1796 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 1797 num_pols = 1; 1798 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1799 err = xfrm_expand_policies(fl, family, pols, 1800 &num_pols, &num_xfrms); 1801 if (err < 0) 1802 goto dropdst; 1803 1804 if (num_pols) { 1805 if (num_xfrms <= 0) { 1806 drop_pols = num_pols; 1807 goto no_transform; 1808 } 1809 1810 xdst = xfrm_resolve_and_create_bundle( 1811 pols, num_pols, fl, 1812 family, dst_orig); 1813 if (IS_ERR(xdst)) { 1814 xfrm_pols_put(pols, num_pols); 1815 err = PTR_ERR(xdst); 1816 goto dropdst; 1817 } else if (xdst == NULL) { 1818 num_xfrms = 0; 1819 drop_pols = num_pols; 1820 goto no_transform; 1821 } 1822 1823 dst_hold(&xdst->u.dst); 1824 1825 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 1826 xdst->u.dst.next = xfrm_policy_sk_bundles; 1827 xfrm_policy_sk_bundles = &xdst->u.dst; 1828 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 1829 1830 route = xdst->route; 1831 } 1832 } 1833 1834 if (xdst == NULL) { 1835 /* To accelerate a bit... */ 1836 if ((dst_orig->flags & DST_NOXFRM) || 1837 !net->xfrm.policy_count[XFRM_POLICY_OUT]) 1838 goto nopol; 1839 1840 flo = flow_cache_lookup(net, fl, family, dir, 1841 xfrm_bundle_lookup, dst_orig); 1842 if (flo == NULL) 1843 goto nopol; 1844 if (IS_ERR(flo)) { 1845 err = PTR_ERR(flo); 1846 goto dropdst; 1847 } 1848 xdst = container_of(flo, struct xfrm_dst, flo); 1849 1850 num_pols = xdst->num_pols; 1851 num_xfrms = xdst->num_xfrms; 1852 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols); 1853 route = xdst->route; 1854 } 1855 1856 dst = &xdst->u.dst; 1857 if (route == NULL && num_xfrms > 0) { 1858 /* The only case when xfrm_bundle_lookup() returns a 1859 * bundle with null route, is when the template could 1860 * not be resolved. It means policies are there, but 1861 * bundle could not be created, since we don't yet 1862 * have the xfrm_state's. We need to wait for KM to 1863 * negotiate new SA's or bail out with error.*/ 1864 if (net->xfrm.sysctl_larval_drop) { 1865 /* EREMOTE tells the caller to generate 1866 * a one-shot blackhole route. */ 1867 dst_release(dst); 1868 xfrm_pols_put(pols, drop_pols); 1869 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1870 1871 return make_blackhole(net, family, dst_orig); 1872 } 1873 if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) { 1874 DECLARE_WAITQUEUE(wait, current); 1875 1876 add_wait_queue(&net->xfrm.km_waitq, &wait); 1877 set_current_state(TASK_INTERRUPTIBLE); 1878 schedule(); 1879 set_current_state(TASK_RUNNING); 1880 remove_wait_queue(&net->xfrm.km_waitq, &wait); 1881 1882 if (!signal_pending(current)) { 1883 dst_release(dst); 1884 goto restart; 1885 } 1886 1887 err = -ERESTART; 1888 } else 1889 err = -EAGAIN; 1890 1891 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1892 goto error; 1893 } 1894 1895 no_transform: 1896 if (num_pols == 0) 1897 goto nopol; 1898 1899 if ((flags & XFRM_LOOKUP_ICMP) && 1900 !(pols[0]->flags & XFRM_POLICY_ICMP)) { 1901 err = -ENOENT; 1902 goto error; 1903 } 1904 1905 for (i = 0; i < num_pols; i++) 1906 pols[i]->curlft.use_time = get_seconds(); 1907 1908 if (num_xfrms < 0) { 1909 /* Prohibit the flow */ 1910 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 1911 err = -EPERM; 1912 goto error; 1913 } else if (num_xfrms > 0) { 1914 /* Flow transformed */ 1915 dst_release(dst_orig); 1916 } else { 1917 /* Flow passes untransformed */ 1918 dst_release(dst); 1919 dst = dst_orig; 1920 } 1921 ok: 1922 xfrm_pols_put(pols, drop_pols); 1923 return dst; 1924 1925 nopol: 1926 if (!(flags & XFRM_LOOKUP_ICMP)) { 1927 dst = dst_orig; 1928 goto ok; 1929 } 1930 err = -ENOENT; 1931 error: 1932 dst_release(dst); 1933 dropdst: 1934 dst_release(dst_orig); 1935 xfrm_pols_put(pols, drop_pols); 1936 return ERR_PTR(err); 1937 } 1938 EXPORT_SYMBOL(xfrm_lookup); 1939 1940 static inline int 1941 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) 1942 { 1943 struct xfrm_state *x; 1944 1945 if (!skb->sp || idx < 0 || idx >= skb->sp->len) 1946 return 0; 1947 x = skb->sp->xvec[idx]; 1948 if (!x->type->reject) 1949 return 0; 1950 return x->type->reject(x, skb, fl); 1951 } 1952 1953 /* When skb is transformed back to its "native" form, we have to 1954 * check policy restrictions. At the moment we make this in maximally 1955 * stupid way. Shame on me. :-) Of course, connected sockets must 1956 * have policy cached at them. 1957 */ 1958 1959 static inline int 1960 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, 1961 unsigned short family) 1962 { 1963 if (xfrm_state_kern(x)) 1964 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 1965 return x->id.proto == tmpl->id.proto && 1966 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1967 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1968 x->props.mode == tmpl->mode && 1969 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 1970 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 1971 !(x->props.mode != XFRM_MODE_TRANSPORT && 1972 xfrm_state_addr_cmp(tmpl, x, family)); 1973 } 1974 1975 /* 1976 * 0 or more than 0 is returned when validation is succeeded (either bypass 1977 * because of optional transport mode, or next index of the mathced secpath 1978 * state with the template. 1979 * -1 is returned when no matching template is found. 1980 * Otherwise "-2 - errored_index" is returned. 1981 */ 1982 static inline int 1983 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, 1984 unsigned short family) 1985 { 1986 int idx = start; 1987 1988 if (tmpl->optional) { 1989 if (tmpl->mode == XFRM_MODE_TRANSPORT) 1990 return start; 1991 } else 1992 start = -1; 1993 for (; idx < sp->len; idx++) { 1994 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 1995 return ++idx; 1996 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 1997 if (start == -1) 1998 start = -2-idx; 1999 break; 2000 } 2001 } 2002 return start; 2003 } 2004 2005 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 2006 unsigned int family, int reverse) 2007 { 2008 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2009 int err; 2010 2011 if (unlikely(afinfo == NULL)) 2012 return -EAFNOSUPPORT; 2013 2014 afinfo->decode_session(skb, fl, reverse); 2015 err = security_xfrm_decode_session(skb, &fl->flowi_secid); 2016 xfrm_policy_put_afinfo(afinfo); 2017 return err; 2018 } 2019 EXPORT_SYMBOL(__xfrm_decode_session); 2020 2021 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) 2022 { 2023 for (; k < sp->len; k++) { 2024 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 2025 *idxp = k; 2026 return 1; 2027 } 2028 } 2029 2030 return 0; 2031 } 2032 2033 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 2034 unsigned short family) 2035 { 2036 struct net *net = dev_net(skb->dev); 2037 struct xfrm_policy *pol; 2038 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 2039 int npols = 0; 2040 int xfrm_nr; 2041 int pi; 2042 int reverse; 2043 struct flowi fl; 2044 u8 fl_dir; 2045 int xerr_idx = -1; 2046 2047 reverse = dir & ~XFRM_POLICY_MASK; 2048 dir &= XFRM_POLICY_MASK; 2049 fl_dir = policy_to_flow_dir(dir); 2050 2051 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { 2052 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 2053 return 0; 2054 } 2055 2056 nf_nat_decode_session(skb, &fl, family); 2057 2058 /* First, check used SA against their selectors. */ 2059 if (skb->sp) { 2060 int i; 2061 2062 for (i=skb->sp->len-1; i>=0; i--) { 2063 struct xfrm_state *x = skb->sp->xvec[i]; 2064 if (!xfrm_selector_match(&x->sel, &fl, family)) { 2065 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 2066 return 0; 2067 } 2068 } 2069 } 2070 2071 pol = NULL; 2072 if (sk && sk->sk_policy[dir]) { 2073 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2074 if (IS_ERR(pol)) { 2075 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2076 return 0; 2077 } 2078 } 2079 2080 if (!pol) { 2081 struct flow_cache_object *flo; 2082 2083 flo = flow_cache_lookup(net, &fl, family, fl_dir, 2084 xfrm_policy_lookup, NULL); 2085 if (IS_ERR_OR_NULL(flo)) 2086 pol = ERR_CAST(flo); 2087 else 2088 pol = container_of(flo, struct xfrm_policy, flo); 2089 } 2090 2091 if (IS_ERR(pol)) { 2092 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2093 return 0; 2094 } 2095 2096 if (!pol) { 2097 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { 2098 xfrm_secpath_reject(xerr_idx, skb, &fl); 2099 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 2100 return 0; 2101 } 2102 return 1; 2103 } 2104 2105 pol->curlft.use_time = get_seconds(); 2106 2107 pols[0] = pol; 2108 npols ++; 2109 #ifdef CONFIG_XFRM_SUB_POLICY 2110 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 2111 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, 2112 &fl, family, 2113 XFRM_POLICY_IN); 2114 if (pols[1]) { 2115 if (IS_ERR(pols[1])) { 2116 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2117 return 0; 2118 } 2119 pols[1]->curlft.use_time = get_seconds(); 2120 npols ++; 2121 } 2122 } 2123 #endif 2124 2125 if (pol->action == XFRM_POLICY_ALLOW) { 2126 struct sec_path *sp; 2127 static struct sec_path dummy; 2128 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 2129 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 2130 struct xfrm_tmpl **tpp = tp; 2131 int ti = 0; 2132 int i, k; 2133 2134 if ((sp = skb->sp) == NULL) 2135 sp = &dummy; 2136 2137 for (pi = 0; pi < npols; pi++) { 2138 if (pols[pi] != pol && 2139 pols[pi]->action != XFRM_POLICY_ALLOW) { 2140 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2141 goto reject; 2142 } 2143 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 2144 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 2145 goto reject_error; 2146 } 2147 for (i = 0; i < pols[pi]->xfrm_nr; i++) 2148 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 2149 } 2150 xfrm_nr = ti; 2151 if (npols > 1) { 2152 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); 2153 tpp = stp; 2154 } 2155 2156 /* For each tunnel xfrm, find the first matching tmpl. 2157 * For each tmpl before that, find corresponding xfrm. 2158 * Order is _important_. Later we will implement 2159 * some barriers, but at the moment barriers 2160 * are implied between each two transformations. 2161 */ 2162 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 2163 k = xfrm_policy_ok(tpp[i], sp, k, family); 2164 if (k < 0) { 2165 if (k < -1) 2166 /* "-2 - errored_index" returned */ 2167 xerr_idx = -(2+k); 2168 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2169 goto reject; 2170 } 2171 } 2172 2173 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 2174 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2175 goto reject; 2176 } 2177 2178 xfrm_pols_put(pols, npols); 2179 return 1; 2180 } 2181 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2182 2183 reject: 2184 xfrm_secpath_reject(xerr_idx, skb, &fl); 2185 reject_error: 2186 xfrm_pols_put(pols, npols); 2187 return 0; 2188 } 2189 EXPORT_SYMBOL(__xfrm_policy_check); 2190 2191 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 2192 { 2193 struct net *net = dev_net(skb->dev); 2194 struct flowi fl; 2195 struct dst_entry *dst; 2196 int res = 1; 2197 2198 if (xfrm_decode_session(skb, &fl, family) < 0) { 2199 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 2200 return 0; 2201 } 2202 2203 skb_dst_force(skb); 2204 2205 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0); 2206 if (IS_ERR(dst)) { 2207 res = 0; 2208 dst = NULL; 2209 } 2210 skb_dst_set(skb, dst); 2211 return res; 2212 } 2213 EXPORT_SYMBOL(__xfrm_route_forward); 2214 2215 /* Optimize later using cookies and generation ids. */ 2216 2217 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 2218 { 2219 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 2220 * to "-1" to force all XFRM destinations to get validated by 2221 * dst_ops->check on every use. We do this because when a 2222 * normal route referenced by an XFRM dst is obsoleted we do 2223 * not go looking around for all parent referencing XFRM dsts 2224 * so that we can invalidate them. It is just too much work. 2225 * Instead we make the checks here on every use. For example: 2226 * 2227 * XFRM dst A --> IPv4 dst X 2228 * 2229 * X is the "xdst->route" of A (X is also the "dst->path" of A 2230 * in this example). If X is marked obsolete, "A" will not 2231 * notice. That's what we are validating here via the 2232 * stale_bundle() check. 2233 * 2234 * When a policy's bundle is pruned, we dst_free() the XFRM 2235 * dst which causes it's ->obsolete field to be set to a 2236 * positive non-zero integer. If an XFRM dst has been pruned 2237 * like this, we want to force a new route lookup. 2238 */ 2239 if (dst->obsolete < 0 && !stale_bundle(dst)) 2240 return dst; 2241 2242 return NULL; 2243 } 2244 2245 static int stale_bundle(struct dst_entry *dst) 2246 { 2247 return !xfrm_bundle_ok((struct xfrm_dst *)dst); 2248 } 2249 2250 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2251 { 2252 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 2253 dst->dev = dev_net(dev)->loopback_dev; 2254 dev_hold(dst->dev); 2255 dev_put(dev); 2256 } 2257 } 2258 EXPORT_SYMBOL(xfrm_dst_ifdown); 2259 2260 static void xfrm_link_failure(struct sk_buff *skb) 2261 { 2262 /* Impossible. Such dst must be popped before reaches point of failure. */ 2263 } 2264 2265 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 2266 { 2267 if (dst) { 2268 if (dst->obsolete) { 2269 dst_release(dst); 2270 dst = NULL; 2271 } 2272 } 2273 return dst; 2274 } 2275 2276 static void __xfrm_garbage_collect(struct net *net) 2277 { 2278 struct dst_entry *head, *next; 2279 2280 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 2281 head = xfrm_policy_sk_bundles; 2282 xfrm_policy_sk_bundles = NULL; 2283 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 2284 2285 while (head) { 2286 next = head->next; 2287 dst_free(head); 2288 head = next; 2289 } 2290 } 2291 2292 static void xfrm_garbage_collect(struct net *net) 2293 { 2294 flow_cache_flush(); 2295 __xfrm_garbage_collect(net); 2296 } 2297 2298 static void xfrm_garbage_collect_deferred(struct net *net) 2299 { 2300 flow_cache_flush_deferred(); 2301 __xfrm_garbage_collect(net); 2302 } 2303 2304 static void xfrm_init_pmtu(struct dst_entry *dst) 2305 { 2306 do { 2307 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2308 u32 pmtu, route_mtu_cached; 2309 2310 pmtu = dst_mtu(dst->child); 2311 xdst->child_mtu_cached = pmtu; 2312 2313 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 2314 2315 route_mtu_cached = dst_mtu(xdst->route); 2316 xdst->route_mtu_cached = route_mtu_cached; 2317 2318 if (pmtu > route_mtu_cached) 2319 pmtu = route_mtu_cached; 2320 2321 dst_metric_set(dst, RTAX_MTU, pmtu); 2322 } while ((dst = dst->next)); 2323 } 2324 2325 /* Check that the bundle accepts the flow and its components are 2326 * still valid. 2327 */ 2328 2329 static int xfrm_bundle_ok(struct xfrm_dst *first) 2330 { 2331 struct dst_entry *dst = &first->u.dst; 2332 struct xfrm_dst *last; 2333 u32 mtu; 2334 2335 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 2336 (dst->dev && !netif_running(dst->dev))) 2337 return 0; 2338 2339 last = NULL; 2340 2341 do { 2342 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2343 2344 if (dst->xfrm->km.state != XFRM_STATE_VALID) 2345 return 0; 2346 if (xdst->xfrm_genid != dst->xfrm->genid) 2347 return 0; 2348 if (xdst->num_pols > 0 && 2349 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 2350 return 0; 2351 2352 mtu = dst_mtu(dst->child); 2353 if (xdst->child_mtu_cached != mtu) { 2354 last = xdst; 2355 xdst->child_mtu_cached = mtu; 2356 } 2357 2358 if (!dst_check(xdst->route, xdst->route_cookie)) 2359 return 0; 2360 mtu = dst_mtu(xdst->route); 2361 if (xdst->route_mtu_cached != mtu) { 2362 last = xdst; 2363 xdst->route_mtu_cached = mtu; 2364 } 2365 2366 dst = dst->child; 2367 } while (dst->xfrm); 2368 2369 if (likely(!last)) 2370 return 1; 2371 2372 mtu = last->child_mtu_cached; 2373 for (;;) { 2374 dst = &last->u.dst; 2375 2376 mtu = xfrm_state_mtu(dst->xfrm, mtu); 2377 if (mtu > last->route_mtu_cached) 2378 mtu = last->route_mtu_cached; 2379 dst_metric_set(dst, RTAX_MTU, mtu); 2380 2381 if (last == first) 2382 break; 2383 2384 last = (struct xfrm_dst *)last->u.dst.next; 2385 last->child_mtu_cached = mtu; 2386 } 2387 2388 return 1; 2389 } 2390 2391 static unsigned int xfrm_default_advmss(const struct dst_entry *dst) 2392 { 2393 return dst_metric_advmss(dst->path); 2394 } 2395 2396 static unsigned int xfrm_mtu(const struct dst_entry *dst) 2397 { 2398 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 2399 2400 return mtu ? : dst_mtu(dst->path); 2401 } 2402 2403 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr) 2404 { 2405 return dst_neigh_lookup(dst->path, daddr); 2406 } 2407 2408 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2409 { 2410 struct net *net; 2411 int err = 0; 2412 if (unlikely(afinfo == NULL)) 2413 return -EINVAL; 2414 if (unlikely(afinfo->family >= NPROTO)) 2415 return -EAFNOSUPPORT; 2416 write_lock_bh(&xfrm_policy_afinfo_lock); 2417 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 2418 err = -ENOBUFS; 2419 else { 2420 struct dst_ops *dst_ops = afinfo->dst_ops; 2421 if (likely(dst_ops->kmem_cachep == NULL)) 2422 dst_ops->kmem_cachep = xfrm_dst_cache; 2423 if (likely(dst_ops->check == NULL)) 2424 dst_ops->check = xfrm_dst_check; 2425 if (likely(dst_ops->default_advmss == NULL)) 2426 dst_ops->default_advmss = xfrm_default_advmss; 2427 if (likely(dst_ops->mtu == NULL)) 2428 dst_ops->mtu = xfrm_mtu; 2429 if (likely(dst_ops->negative_advice == NULL)) 2430 dst_ops->negative_advice = xfrm_negative_advice; 2431 if (likely(dst_ops->link_failure == NULL)) 2432 dst_ops->link_failure = xfrm_link_failure; 2433 if (likely(dst_ops->neigh_lookup == NULL)) 2434 dst_ops->neigh_lookup = xfrm_neigh_lookup; 2435 if (likely(afinfo->garbage_collect == NULL)) 2436 afinfo->garbage_collect = xfrm_garbage_collect_deferred; 2437 xfrm_policy_afinfo[afinfo->family] = afinfo; 2438 } 2439 write_unlock_bh(&xfrm_policy_afinfo_lock); 2440 2441 rtnl_lock(); 2442 for_each_net(net) { 2443 struct dst_ops *xfrm_dst_ops; 2444 2445 switch (afinfo->family) { 2446 case AF_INET: 2447 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; 2448 break; 2449 #if IS_ENABLED(CONFIG_IPV6) 2450 case AF_INET6: 2451 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; 2452 break; 2453 #endif 2454 default: 2455 BUG(); 2456 } 2457 *xfrm_dst_ops = *afinfo->dst_ops; 2458 } 2459 rtnl_unlock(); 2460 2461 return err; 2462 } 2463 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 2464 2465 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) 2466 { 2467 int err = 0; 2468 if (unlikely(afinfo == NULL)) 2469 return -EINVAL; 2470 if (unlikely(afinfo->family >= NPROTO)) 2471 return -EAFNOSUPPORT; 2472 write_lock_bh(&xfrm_policy_afinfo_lock); 2473 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 2474 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 2475 err = -EINVAL; 2476 else { 2477 struct dst_ops *dst_ops = afinfo->dst_ops; 2478 xfrm_policy_afinfo[afinfo->family] = NULL; 2479 dst_ops->kmem_cachep = NULL; 2480 dst_ops->check = NULL; 2481 dst_ops->negative_advice = NULL; 2482 dst_ops->link_failure = NULL; 2483 afinfo->garbage_collect = NULL; 2484 } 2485 } 2486 write_unlock_bh(&xfrm_policy_afinfo_lock); 2487 return err; 2488 } 2489 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2490 2491 static void __net_init xfrm_dst_ops_init(struct net *net) 2492 { 2493 struct xfrm_policy_afinfo *afinfo; 2494 2495 read_lock_bh(&xfrm_policy_afinfo_lock); 2496 afinfo = xfrm_policy_afinfo[AF_INET]; 2497 if (afinfo) 2498 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; 2499 #if IS_ENABLED(CONFIG_IPV6) 2500 afinfo = xfrm_policy_afinfo[AF_INET6]; 2501 if (afinfo) 2502 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; 2503 #endif 2504 read_unlock_bh(&xfrm_policy_afinfo_lock); 2505 } 2506 2507 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2508 { 2509 struct xfrm_policy_afinfo *afinfo; 2510 if (unlikely(family >= NPROTO)) 2511 return NULL; 2512 read_lock(&xfrm_policy_afinfo_lock); 2513 afinfo = xfrm_policy_afinfo[family]; 2514 if (unlikely(!afinfo)) 2515 read_unlock(&xfrm_policy_afinfo_lock); 2516 return afinfo; 2517 } 2518 2519 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) 2520 { 2521 read_unlock(&xfrm_policy_afinfo_lock); 2522 } 2523 2524 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 2525 { 2526 struct net_device *dev = ptr; 2527 2528 switch (event) { 2529 case NETDEV_DOWN: 2530 xfrm_garbage_collect(dev_net(dev)); 2531 } 2532 return NOTIFY_DONE; 2533 } 2534 2535 static struct notifier_block xfrm_dev_notifier = { 2536 .notifier_call = xfrm_dev_event, 2537 }; 2538 2539 #ifdef CONFIG_XFRM_STATISTICS 2540 static int __net_init xfrm_statistics_init(struct net *net) 2541 { 2542 int rv; 2543 2544 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, 2545 sizeof(struct linux_xfrm_mib), 2546 __alignof__(struct linux_xfrm_mib)) < 0) 2547 return -ENOMEM; 2548 rv = xfrm_proc_init(net); 2549 if (rv < 0) 2550 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2551 return rv; 2552 } 2553 2554 static void xfrm_statistics_fini(struct net *net) 2555 { 2556 xfrm_proc_fini(net); 2557 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2558 } 2559 #else 2560 static int __net_init xfrm_statistics_init(struct net *net) 2561 { 2562 return 0; 2563 } 2564 2565 static void xfrm_statistics_fini(struct net *net) 2566 { 2567 } 2568 #endif 2569 2570 static int __net_init xfrm_policy_init(struct net *net) 2571 { 2572 unsigned int hmask, sz; 2573 int dir; 2574 2575 if (net_eq(net, &init_net)) 2576 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 2577 sizeof(struct xfrm_dst), 2578 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2579 NULL); 2580 2581 hmask = 8 - 1; 2582 sz = (hmask+1) * sizeof(struct hlist_head); 2583 2584 net->xfrm.policy_byidx = xfrm_hash_alloc(sz); 2585 if (!net->xfrm.policy_byidx) 2586 goto out_byidx; 2587 net->xfrm.policy_idx_hmask = hmask; 2588 2589 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2590 struct xfrm_policy_hash *htab; 2591 2592 net->xfrm.policy_count[dir] = 0; 2593 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 2594 2595 htab = &net->xfrm.policy_bydst[dir]; 2596 htab->table = xfrm_hash_alloc(sz); 2597 if (!htab->table) 2598 goto out_bydst; 2599 htab->hmask = hmask; 2600 } 2601 2602 INIT_LIST_HEAD(&net->xfrm.policy_all); 2603 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 2604 if (net_eq(net, &init_net)) 2605 register_netdevice_notifier(&xfrm_dev_notifier); 2606 return 0; 2607 2608 out_bydst: 2609 for (dir--; dir >= 0; dir--) { 2610 struct xfrm_policy_hash *htab; 2611 2612 htab = &net->xfrm.policy_bydst[dir]; 2613 xfrm_hash_free(htab->table, sz); 2614 } 2615 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2616 out_byidx: 2617 return -ENOMEM; 2618 } 2619 2620 static void xfrm_policy_fini(struct net *net) 2621 { 2622 struct xfrm_audit audit_info; 2623 unsigned int sz; 2624 int dir; 2625 2626 flush_work(&net->xfrm.policy_hash_work); 2627 #ifdef CONFIG_XFRM_SUB_POLICY 2628 audit_info.loginuid = -1; 2629 audit_info.sessionid = -1; 2630 audit_info.secid = 0; 2631 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info); 2632 #endif 2633 audit_info.loginuid = -1; 2634 audit_info.sessionid = -1; 2635 audit_info.secid = 0; 2636 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); 2637 2638 WARN_ON(!list_empty(&net->xfrm.policy_all)); 2639 2640 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2641 struct xfrm_policy_hash *htab; 2642 2643 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 2644 2645 htab = &net->xfrm.policy_bydst[dir]; 2646 sz = (htab->hmask + 1); 2647 WARN_ON(!hlist_empty(htab->table)); 2648 xfrm_hash_free(htab->table, sz); 2649 } 2650 2651 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); 2652 WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); 2653 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2654 } 2655 2656 static int __net_init xfrm_net_init(struct net *net) 2657 { 2658 int rv; 2659 2660 rv = xfrm_statistics_init(net); 2661 if (rv < 0) 2662 goto out_statistics; 2663 rv = xfrm_state_init(net); 2664 if (rv < 0) 2665 goto out_state; 2666 rv = xfrm_policy_init(net); 2667 if (rv < 0) 2668 goto out_policy; 2669 xfrm_dst_ops_init(net); 2670 rv = xfrm_sysctl_init(net); 2671 if (rv < 0) 2672 goto out_sysctl; 2673 return 0; 2674 2675 out_sysctl: 2676 xfrm_policy_fini(net); 2677 out_policy: 2678 xfrm_state_fini(net); 2679 out_state: 2680 xfrm_statistics_fini(net); 2681 out_statistics: 2682 return rv; 2683 } 2684 2685 static void __net_exit xfrm_net_exit(struct net *net) 2686 { 2687 xfrm_sysctl_fini(net); 2688 xfrm_policy_fini(net); 2689 xfrm_state_fini(net); 2690 xfrm_statistics_fini(net); 2691 } 2692 2693 static struct pernet_operations __net_initdata xfrm_net_ops = { 2694 .init = xfrm_net_init, 2695 .exit = xfrm_net_exit, 2696 }; 2697 2698 void __init xfrm_init(void) 2699 { 2700 register_pernet_subsys(&xfrm_net_ops); 2701 xfrm_input_init(); 2702 } 2703 2704 #ifdef CONFIG_AUDITSYSCALL 2705 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 2706 struct audit_buffer *audit_buf) 2707 { 2708 struct xfrm_sec_ctx *ctx = xp->security; 2709 struct xfrm_selector *sel = &xp->selector; 2710 2711 if (ctx) 2712 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 2713 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 2714 2715 switch(sel->family) { 2716 case AF_INET: 2717 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); 2718 if (sel->prefixlen_s != 32) 2719 audit_log_format(audit_buf, " src_prefixlen=%d", 2720 sel->prefixlen_s); 2721 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); 2722 if (sel->prefixlen_d != 32) 2723 audit_log_format(audit_buf, " dst_prefixlen=%d", 2724 sel->prefixlen_d); 2725 break; 2726 case AF_INET6: 2727 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); 2728 if (sel->prefixlen_s != 128) 2729 audit_log_format(audit_buf, " src_prefixlen=%d", 2730 sel->prefixlen_s); 2731 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); 2732 if (sel->prefixlen_d != 128) 2733 audit_log_format(audit_buf, " dst_prefixlen=%d", 2734 sel->prefixlen_d); 2735 break; 2736 } 2737 } 2738 2739 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 2740 uid_t auid, u32 sessionid, u32 secid) 2741 { 2742 struct audit_buffer *audit_buf; 2743 2744 audit_buf = xfrm_audit_start("SPD-add"); 2745 if (audit_buf == NULL) 2746 return; 2747 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2748 audit_log_format(audit_buf, " res=%u", result); 2749 xfrm_audit_common_policyinfo(xp, audit_buf); 2750 audit_log_end(audit_buf); 2751 } 2752 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 2753 2754 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 2755 uid_t auid, u32 sessionid, u32 secid) 2756 { 2757 struct audit_buffer *audit_buf; 2758 2759 audit_buf = xfrm_audit_start("SPD-delete"); 2760 if (audit_buf == NULL) 2761 return; 2762 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2763 audit_log_format(audit_buf, " res=%u", result); 2764 xfrm_audit_common_policyinfo(xp, audit_buf); 2765 audit_log_end(audit_buf); 2766 } 2767 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 2768 #endif 2769 2770 #ifdef CONFIG_XFRM_MIGRATE 2771 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 2772 const struct xfrm_selector *sel_tgt) 2773 { 2774 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 2775 if (sel_tgt->family == sel_cmp->family && 2776 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr, 2777 sel_cmp->family) == 0 && 2778 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr, 2779 sel_cmp->family) == 0 && 2780 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 2781 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 2782 return true; 2783 } 2784 } else { 2785 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 2786 return true; 2787 } 2788 } 2789 return false; 2790 } 2791 2792 static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, 2793 u8 dir, u8 type) 2794 { 2795 struct xfrm_policy *pol, *ret = NULL; 2796 struct hlist_node *entry; 2797 struct hlist_head *chain; 2798 u32 priority = ~0U; 2799 2800 read_lock_bh(&xfrm_policy_lock); 2801 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); 2802 hlist_for_each_entry(pol, entry, chain, bydst) { 2803 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2804 pol->type == type) { 2805 ret = pol; 2806 priority = ret->priority; 2807 break; 2808 } 2809 } 2810 chain = &init_net.xfrm.policy_inexact[dir]; 2811 hlist_for_each_entry(pol, entry, chain, bydst) { 2812 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2813 pol->type == type && 2814 pol->priority < priority) { 2815 ret = pol; 2816 break; 2817 } 2818 } 2819 2820 if (ret) 2821 xfrm_pol_hold(ret); 2822 2823 read_unlock_bh(&xfrm_policy_lock); 2824 2825 return ret; 2826 } 2827 2828 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) 2829 { 2830 int match = 0; 2831 2832 if (t->mode == m->mode && t->id.proto == m->proto && 2833 (m->reqid == 0 || t->reqid == m->reqid)) { 2834 switch (t->mode) { 2835 case XFRM_MODE_TUNNEL: 2836 case XFRM_MODE_BEET: 2837 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr, 2838 m->old_family) == 0 && 2839 xfrm_addr_cmp(&t->saddr, &m->old_saddr, 2840 m->old_family) == 0) { 2841 match = 1; 2842 } 2843 break; 2844 case XFRM_MODE_TRANSPORT: 2845 /* in case of transport mode, template does not store 2846 any IP addresses, hence we just compare mode and 2847 protocol */ 2848 match = 1; 2849 break; 2850 default: 2851 break; 2852 } 2853 } 2854 return match; 2855 } 2856 2857 /* update endpoint address(es) of template(s) */ 2858 static int xfrm_policy_migrate(struct xfrm_policy *pol, 2859 struct xfrm_migrate *m, int num_migrate) 2860 { 2861 struct xfrm_migrate *mp; 2862 int i, j, n = 0; 2863 2864 write_lock_bh(&pol->lock); 2865 if (unlikely(pol->walk.dead)) { 2866 /* target policy has been deleted */ 2867 write_unlock_bh(&pol->lock); 2868 return -ENOENT; 2869 } 2870 2871 for (i = 0; i < pol->xfrm_nr; i++) { 2872 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 2873 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 2874 continue; 2875 n++; 2876 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 2877 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 2878 continue; 2879 /* update endpoints */ 2880 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 2881 sizeof(pol->xfrm_vec[i].id.daddr)); 2882 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 2883 sizeof(pol->xfrm_vec[i].saddr)); 2884 pol->xfrm_vec[i].encap_family = mp->new_family; 2885 /* flush bundles */ 2886 atomic_inc(&pol->genid); 2887 } 2888 } 2889 2890 write_unlock_bh(&pol->lock); 2891 2892 if (!n) 2893 return -ENODATA; 2894 2895 return 0; 2896 } 2897 2898 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) 2899 { 2900 int i, j; 2901 2902 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) 2903 return -EINVAL; 2904 2905 for (i = 0; i < num_migrate; i++) { 2906 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr, 2907 m[i].old_family) == 0) && 2908 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr, 2909 m[i].old_family) == 0)) 2910 return -EINVAL; 2911 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 2912 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 2913 return -EINVAL; 2914 2915 /* check if there is any duplicated entry */ 2916 for (j = i + 1; j < num_migrate; j++) { 2917 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 2918 sizeof(m[i].old_daddr)) && 2919 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 2920 sizeof(m[i].old_saddr)) && 2921 m[i].proto == m[j].proto && 2922 m[i].mode == m[j].mode && 2923 m[i].reqid == m[j].reqid && 2924 m[i].old_family == m[j].old_family) 2925 return -EINVAL; 2926 } 2927 } 2928 2929 return 0; 2930 } 2931 2932 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2933 struct xfrm_migrate *m, int num_migrate, 2934 struct xfrm_kmaddress *k) 2935 { 2936 int i, err, nx_cur = 0, nx_new = 0; 2937 struct xfrm_policy *pol = NULL; 2938 struct xfrm_state *x, *xc; 2939 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 2940 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 2941 struct xfrm_migrate *mp; 2942 2943 if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 2944 goto out; 2945 2946 /* Stage 1 - find policy */ 2947 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) { 2948 err = -ENOENT; 2949 goto out; 2950 } 2951 2952 /* Stage 2 - find and update state(s) */ 2953 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 2954 if ((x = xfrm_migrate_state_find(mp))) { 2955 x_cur[nx_cur] = x; 2956 nx_cur++; 2957 if ((xc = xfrm_state_migrate(x, mp))) { 2958 x_new[nx_new] = xc; 2959 nx_new++; 2960 } else { 2961 err = -ENODATA; 2962 goto restore_state; 2963 } 2964 } 2965 } 2966 2967 /* Stage 3 - update policy */ 2968 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) 2969 goto restore_state; 2970 2971 /* Stage 4 - delete old state(s) */ 2972 if (nx_cur) { 2973 xfrm_states_put(x_cur, nx_cur); 2974 xfrm_states_delete(x_cur, nx_cur); 2975 } 2976 2977 /* Stage 5 - announce */ 2978 km_migrate(sel, dir, type, m, num_migrate, k); 2979 2980 xfrm_pol_put(pol); 2981 2982 return 0; 2983 out: 2984 return err; 2985 2986 restore_state: 2987 if (pol) 2988 xfrm_pol_put(pol); 2989 if (nx_cur) 2990 xfrm_states_put(x_cur, nx_cur); 2991 if (nx_new) 2992 xfrm_states_delete(x_new, nx_new); 2993 2994 return err; 2995 } 2996 EXPORT_SYMBOL(xfrm_migrate); 2997 #endif 2998