1 /* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/kmod.h> 19 #include <linux/list.h> 20 #include <linux/spinlock.h> 21 #include <linux/workqueue.h> 22 #include <linux/notifier.h> 23 #include <linux/netdevice.h> 24 #include <linux/netfilter.h> 25 #include <linux/module.h> 26 #include <linux/cache.h> 27 #include <linux/audit.h> 28 #include <net/dst.h> 29 #include <net/flow.h> 30 #include <net/xfrm.h> 31 #include <net/ip.h> 32 #ifdef CONFIG_XFRM_STATISTICS 33 #include <net/snmp.h> 34 #endif 35 36 #include "xfrm_hash.h" 37 38 DEFINE_MUTEX(xfrm_cfg_mutex); 39 EXPORT_SYMBOL(xfrm_cfg_mutex); 40 41 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); 42 static struct dst_entry *xfrm_policy_sk_bundles; 43 static DEFINE_RWLOCK(xfrm_policy_lock); 44 45 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 46 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 47 48 static struct kmem_cache *xfrm_dst_cache __read_mostly; 49 50 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); 51 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 52 static void xfrm_init_pmtu(struct dst_entry *dst); 53 static int stale_bundle(struct dst_entry *dst); 54 static int xfrm_bundle_ok(struct xfrm_dst *xdst); 55 56 57 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 58 int dir); 59 60 static inline bool 61 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 62 { 63 const struct flowi4 *fl4 = &fl->u.ip4; 64 65 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && 66 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && 67 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && 68 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && 69 (fl4->flowi4_proto == sel->proto || !sel->proto) && 70 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 71 } 72 73 static inline bool 74 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 75 { 76 const struct flowi6 *fl6 = &fl->u.ip6; 77 78 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && 79 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && 80 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && 81 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && 82 (fl6->flowi6_proto == sel->proto || !sel->proto) && 83 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 84 } 85 86 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 87 unsigned short family) 88 { 89 switch (family) { 90 case AF_INET: 91 return __xfrm4_selector_match(sel, fl); 92 case AF_INET6: 93 return __xfrm6_selector_match(sel, fl); 94 } 95 return false; 96 } 97 98 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, 99 const xfrm_address_t *saddr, 100 const xfrm_address_t *daddr, 101 int family) 102 { 103 struct xfrm_policy_afinfo *afinfo; 104 struct dst_entry *dst; 105 106 afinfo = xfrm_policy_get_afinfo(family); 107 if (unlikely(afinfo == NULL)) 108 return ERR_PTR(-EAFNOSUPPORT); 109 110 dst = afinfo->dst_lookup(net, tos, saddr, daddr); 111 112 xfrm_policy_put_afinfo(afinfo); 113 114 return dst; 115 } 116 117 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, 118 xfrm_address_t *prev_saddr, 119 xfrm_address_t *prev_daddr, 120 int family) 121 { 122 struct net *net = xs_net(x); 123 xfrm_address_t *saddr = &x->props.saddr; 124 xfrm_address_t *daddr = &x->id.daddr; 125 struct dst_entry *dst; 126 127 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 128 saddr = x->coaddr; 129 daddr = prev_daddr; 130 } 131 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 132 saddr = prev_saddr; 133 daddr = x->coaddr; 134 } 135 136 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family); 137 138 if (!IS_ERR(dst)) { 139 if (prev_saddr != saddr) 140 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 141 if (prev_daddr != daddr) 142 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 143 } 144 145 return dst; 146 } 147 148 static inline unsigned long make_jiffies(long secs) 149 { 150 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 151 return MAX_SCHEDULE_TIMEOUT-1; 152 else 153 return secs*HZ; 154 } 155 156 static void xfrm_policy_timer(unsigned long data) 157 { 158 struct xfrm_policy *xp = (struct xfrm_policy*)data; 159 unsigned long now = get_seconds(); 160 long next = LONG_MAX; 161 int warn = 0; 162 int dir; 163 164 read_lock(&xp->lock); 165 166 if (unlikely(xp->walk.dead)) 167 goto out; 168 169 dir = xfrm_policy_id2dir(xp->index); 170 171 if (xp->lft.hard_add_expires_seconds) { 172 long tmo = xp->lft.hard_add_expires_seconds + 173 xp->curlft.add_time - now; 174 if (tmo <= 0) 175 goto expired; 176 if (tmo < next) 177 next = tmo; 178 } 179 if (xp->lft.hard_use_expires_seconds) { 180 long tmo = xp->lft.hard_use_expires_seconds + 181 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 182 if (tmo <= 0) 183 goto expired; 184 if (tmo < next) 185 next = tmo; 186 } 187 if (xp->lft.soft_add_expires_seconds) { 188 long tmo = xp->lft.soft_add_expires_seconds + 189 xp->curlft.add_time - now; 190 if (tmo <= 0) { 191 warn = 1; 192 tmo = XFRM_KM_TIMEOUT; 193 } 194 if (tmo < next) 195 next = tmo; 196 } 197 if (xp->lft.soft_use_expires_seconds) { 198 long tmo = xp->lft.soft_use_expires_seconds + 199 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 200 if (tmo <= 0) { 201 warn = 1; 202 tmo = XFRM_KM_TIMEOUT; 203 } 204 if (tmo < next) 205 next = tmo; 206 } 207 208 if (warn) 209 km_policy_expired(xp, dir, 0, 0); 210 if (next != LONG_MAX && 211 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 212 xfrm_pol_hold(xp); 213 214 out: 215 read_unlock(&xp->lock); 216 xfrm_pol_put(xp); 217 return; 218 219 expired: 220 read_unlock(&xp->lock); 221 if (!xfrm_policy_delete(xp, dir)) 222 km_policy_expired(xp, dir, 1, 0); 223 xfrm_pol_put(xp); 224 } 225 226 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) 227 { 228 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 229 230 if (unlikely(pol->walk.dead)) 231 flo = NULL; 232 else 233 xfrm_pol_hold(pol); 234 235 return flo; 236 } 237 238 static int xfrm_policy_flo_check(struct flow_cache_object *flo) 239 { 240 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 241 242 return !pol->walk.dead; 243 } 244 245 static void xfrm_policy_flo_delete(struct flow_cache_object *flo) 246 { 247 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); 248 } 249 250 static const struct flow_cache_ops xfrm_policy_fc_ops = { 251 .get = xfrm_policy_flo_get, 252 .check = xfrm_policy_flo_check, 253 .delete = xfrm_policy_flo_delete, 254 }; 255 256 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 257 * SPD calls. 258 */ 259 260 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) 261 { 262 struct xfrm_policy *policy; 263 264 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 265 266 if (policy) { 267 write_pnet(&policy->xp_net, net); 268 INIT_LIST_HEAD(&policy->walk.all); 269 INIT_HLIST_NODE(&policy->bydst); 270 INIT_HLIST_NODE(&policy->byidx); 271 rwlock_init(&policy->lock); 272 atomic_set(&policy->refcnt, 1); 273 setup_timer(&policy->timer, xfrm_policy_timer, 274 (unsigned long)policy); 275 policy->flo.ops = &xfrm_policy_fc_ops; 276 } 277 return policy; 278 } 279 EXPORT_SYMBOL(xfrm_policy_alloc); 280 281 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 282 283 void xfrm_policy_destroy(struct xfrm_policy *policy) 284 { 285 BUG_ON(!policy->walk.dead); 286 287 if (del_timer(&policy->timer)) 288 BUG(); 289 290 security_xfrm_policy_free(policy->security); 291 kfree(policy); 292 } 293 EXPORT_SYMBOL(xfrm_policy_destroy); 294 295 /* Rule must be locked. Release descentant resources, announce 296 * entry dead. The rule must be unlinked from lists to the moment. 297 */ 298 299 static void xfrm_policy_kill(struct xfrm_policy *policy) 300 { 301 policy->walk.dead = 1; 302 303 atomic_inc(&policy->genid); 304 305 if (del_timer(&policy->timer)) 306 xfrm_pol_put(policy); 307 308 xfrm_pol_put(policy); 309 } 310 311 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 312 313 static inline unsigned int idx_hash(struct net *net, u32 index) 314 { 315 return __idx_hash(index, net->xfrm.policy_idx_hmask); 316 } 317 318 static struct hlist_head *policy_hash_bysel(struct net *net, 319 const struct xfrm_selector *sel, 320 unsigned short family, int dir) 321 { 322 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 323 unsigned int hash = __sel_hash(sel, family, hmask); 324 325 return (hash == hmask + 1 ? 326 &net->xfrm.policy_inexact[dir] : 327 net->xfrm.policy_bydst[dir].table + hash); 328 } 329 330 static struct hlist_head *policy_hash_direct(struct net *net, 331 const xfrm_address_t *daddr, 332 const xfrm_address_t *saddr, 333 unsigned short family, int dir) 334 { 335 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 336 unsigned int hash = __addr_hash(daddr, saddr, family, hmask); 337 338 return net->xfrm.policy_bydst[dir].table + hash; 339 } 340 341 static void xfrm_dst_hash_transfer(struct hlist_head *list, 342 struct hlist_head *ndsttable, 343 unsigned int nhashmask) 344 { 345 struct hlist_node *entry, *tmp, *entry0 = NULL; 346 struct xfrm_policy *pol; 347 unsigned int h0 = 0; 348 349 redo: 350 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { 351 unsigned int h; 352 353 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 354 pol->family, nhashmask); 355 if (!entry0) { 356 hlist_del(entry); 357 hlist_add_head(&pol->bydst, ndsttable+h); 358 h0 = h; 359 } else { 360 if (h != h0) 361 continue; 362 hlist_del(entry); 363 hlist_add_after(entry0, &pol->bydst); 364 } 365 entry0 = entry; 366 } 367 if (!hlist_empty(list)) { 368 entry0 = NULL; 369 goto redo; 370 } 371 } 372 373 static void xfrm_idx_hash_transfer(struct hlist_head *list, 374 struct hlist_head *nidxtable, 375 unsigned int nhashmask) 376 { 377 struct hlist_node *entry, *tmp; 378 struct xfrm_policy *pol; 379 380 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { 381 unsigned int h; 382 383 h = __idx_hash(pol->index, nhashmask); 384 hlist_add_head(&pol->byidx, nidxtable+h); 385 } 386 } 387 388 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 389 { 390 return ((old_hmask + 1) << 1) - 1; 391 } 392 393 static void xfrm_bydst_resize(struct net *net, int dir) 394 { 395 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 396 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 397 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 398 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table; 399 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 400 int i; 401 402 if (!ndst) 403 return; 404 405 write_lock_bh(&xfrm_policy_lock); 406 407 for (i = hmask; i >= 0; i--) 408 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); 409 410 net->xfrm.policy_bydst[dir].table = ndst; 411 net->xfrm.policy_bydst[dir].hmask = nhashmask; 412 413 write_unlock_bh(&xfrm_policy_lock); 414 415 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 416 } 417 418 static void xfrm_byidx_resize(struct net *net, int total) 419 { 420 unsigned int hmask = net->xfrm.policy_idx_hmask; 421 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 422 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 423 struct hlist_head *oidx = net->xfrm.policy_byidx; 424 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 425 int i; 426 427 if (!nidx) 428 return; 429 430 write_lock_bh(&xfrm_policy_lock); 431 432 for (i = hmask; i >= 0; i--) 433 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 434 435 net->xfrm.policy_byidx = nidx; 436 net->xfrm.policy_idx_hmask = nhashmask; 437 438 write_unlock_bh(&xfrm_policy_lock); 439 440 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 441 } 442 443 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) 444 { 445 unsigned int cnt = net->xfrm.policy_count[dir]; 446 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 447 448 if (total) 449 *total += cnt; 450 451 if ((hmask + 1) < xfrm_policy_hashmax && 452 cnt > hmask) 453 return 1; 454 455 return 0; 456 } 457 458 static inline int xfrm_byidx_should_resize(struct net *net, int total) 459 { 460 unsigned int hmask = net->xfrm.policy_idx_hmask; 461 462 if ((hmask + 1) < xfrm_policy_hashmax && 463 total > hmask) 464 return 1; 465 466 return 0; 467 } 468 469 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 470 { 471 read_lock_bh(&xfrm_policy_lock); 472 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 473 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 474 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 475 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 476 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 477 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 478 si->spdhcnt = net->xfrm.policy_idx_hmask; 479 si->spdhmcnt = xfrm_policy_hashmax; 480 read_unlock_bh(&xfrm_policy_lock); 481 } 482 EXPORT_SYMBOL(xfrm_spd_getinfo); 483 484 static DEFINE_MUTEX(hash_resize_mutex); 485 static void xfrm_hash_resize(struct work_struct *work) 486 { 487 struct net *net = container_of(work, struct net, xfrm.policy_hash_work); 488 int dir, total; 489 490 mutex_lock(&hash_resize_mutex); 491 492 total = 0; 493 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 494 if (xfrm_bydst_should_resize(net, dir, &total)) 495 xfrm_bydst_resize(net, dir); 496 } 497 if (xfrm_byidx_should_resize(net, total)) 498 xfrm_byidx_resize(net, total); 499 500 mutex_unlock(&hash_resize_mutex); 501 } 502 503 /* Generate new index... KAME seems to generate them ordered by cost 504 * of an absolute inpredictability of ordering of rules. This will not pass. */ 505 static u32 xfrm_gen_index(struct net *net, int dir) 506 { 507 static u32 idx_generator; 508 509 for (;;) { 510 struct hlist_node *entry; 511 struct hlist_head *list; 512 struct xfrm_policy *p; 513 u32 idx; 514 int found; 515 516 idx = (idx_generator | dir); 517 idx_generator += 8; 518 if (idx == 0) 519 idx = 8; 520 list = net->xfrm.policy_byidx + idx_hash(net, idx); 521 found = 0; 522 hlist_for_each_entry(p, entry, list, byidx) { 523 if (p->index == idx) { 524 found = 1; 525 break; 526 } 527 } 528 if (!found) 529 return idx; 530 } 531 } 532 533 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 534 { 535 u32 *p1 = (u32 *) s1; 536 u32 *p2 = (u32 *) s2; 537 int len = sizeof(struct xfrm_selector) / sizeof(u32); 538 int i; 539 540 for (i = 0; i < len; i++) { 541 if (p1[i] != p2[i]) 542 return 1; 543 } 544 545 return 0; 546 } 547 548 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 549 { 550 struct net *net = xp_net(policy); 551 struct xfrm_policy *pol; 552 struct xfrm_policy *delpol; 553 struct hlist_head *chain; 554 struct hlist_node *entry, *newpos; 555 u32 mark = policy->mark.v & policy->mark.m; 556 557 write_lock_bh(&xfrm_policy_lock); 558 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 559 delpol = NULL; 560 newpos = NULL; 561 hlist_for_each_entry(pol, entry, chain, bydst) { 562 if (pol->type == policy->type && 563 !selector_cmp(&pol->selector, &policy->selector) && 564 (mark & pol->mark.m) == pol->mark.v && 565 xfrm_sec_ctx_match(pol->security, policy->security) && 566 !WARN_ON(delpol)) { 567 if (excl) { 568 write_unlock_bh(&xfrm_policy_lock); 569 return -EEXIST; 570 } 571 delpol = pol; 572 if (policy->priority > pol->priority) 573 continue; 574 } else if (policy->priority >= pol->priority) { 575 newpos = &pol->bydst; 576 continue; 577 } 578 if (delpol) 579 break; 580 } 581 if (newpos) 582 hlist_add_after(newpos, &policy->bydst); 583 else 584 hlist_add_head(&policy->bydst, chain); 585 xfrm_pol_hold(policy); 586 net->xfrm.policy_count[dir]++; 587 atomic_inc(&flow_cache_genid); 588 if (delpol) 589 __xfrm_policy_unlink(delpol, dir); 590 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); 591 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 592 policy->curlft.add_time = get_seconds(); 593 policy->curlft.use_time = 0; 594 if (!mod_timer(&policy->timer, jiffies + HZ)) 595 xfrm_pol_hold(policy); 596 list_add(&policy->walk.all, &net->xfrm.policy_all); 597 write_unlock_bh(&xfrm_policy_lock); 598 599 if (delpol) 600 xfrm_policy_kill(delpol); 601 else if (xfrm_bydst_should_resize(net, dir, NULL)) 602 schedule_work(&net->xfrm.policy_hash_work); 603 604 return 0; 605 } 606 EXPORT_SYMBOL(xfrm_policy_insert); 607 608 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, 609 int dir, struct xfrm_selector *sel, 610 struct xfrm_sec_ctx *ctx, int delete, 611 int *err) 612 { 613 struct xfrm_policy *pol, *ret; 614 struct hlist_head *chain; 615 struct hlist_node *entry; 616 617 *err = 0; 618 write_lock_bh(&xfrm_policy_lock); 619 chain = policy_hash_bysel(net, sel, sel->family, dir); 620 ret = NULL; 621 hlist_for_each_entry(pol, entry, chain, bydst) { 622 if (pol->type == type && 623 (mark & pol->mark.m) == pol->mark.v && 624 !selector_cmp(sel, &pol->selector) && 625 xfrm_sec_ctx_match(ctx, pol->security)) { 626 xfrm_pol_hold(pol); 627 if (delete) { 628 *err = security_xfrm_policy_delete( 629 pol->security); 630 if (*err) { 631 write_unlock_bh(&xfrm_policy_lock); 632 return pol; 633 } 634 __xfrm_policy_unlink(pol, dir); 635 } 636 ret = pol; 637 break; 638 } 639 } 640 write_unlock_bh(&xfrm_policy_lock); 641 642 if (ret && delete) 643 xfrm_policy_kill(ret); 644 return ret; 645 } 646 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 647 648 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, 649 int dir, u32 id, int delete, int *err) 650 { 651 struct xfrm_policy *pol, *ret; 652 struct hlist_head *chain; 653 struct hlist_node *entry; 654 655 *err = -ENOENT; 656 if (xfrm_policy_id2dir(id) != dir) 657 return NULL; 658 659 *err = 0; 660 write_lock_bh(&xfrm_policy_lock); 661 chain = net->xfrm.policy_byidx + idx_hash(net, id); 662 ret = NULL; 663 hlist_for_each_entry(pol, entry, chain, byidx) { 664 if (pol->type == type && pol->index == id && 665 (mark & pol->mark.m) == pol->mark.v) { 666 xfrm_pol_hold(pol); 667 if (delete) { 668 *err = security_xfrm_policy_delete( 669 pol->security); 670 if (*err) { 671 write_unlock_bh(&xfrm_policy_lock); 672 return pol; 673 } 674 __xfrm_policy_unlink(pol, dir); 675 } 676 ret = pol; 677 break; 678 } 679 } 680 write_unlock_bh(&xfrm_policy_lock); 681 682 if (ret && delete) 683 xfrm_policy_kill(ret); 684 return ret; 685 } 686 EXPORT_SYMBOL(xfrm_policy_byid); 687 688 #ifdef CONFIG_SECURITY_NETWORK_XFRM 689 static inline int 690 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 691 { 692 int dir, err = 0; 693 694 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 695 struct xfrm_policy *pol; 696 struct hlist_node *entry; 697 int i; 698 699 hlist_for_each_entry(pol, entry, 700 &net->xfrm.policy_inexact[dir], bydst) { 701 if (pol->type != type) 702 continue; 703 err = security_xfrm_policy_delete(pol->security); 704 if (err) { 705 xfrm_audit_policy_delete(pol, 0, 706 audit_info->loginuid, 707 audit_info->sessionid, 708 audit_info->secid); 709 return err; 710 } 711 } 712 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 713 hlist_for_each_entry(pol, entry, 714 net->xfrm.policy_bydst[dir].table + i, 715 bydst) { 716 if (pol->type != type) 717 continue; 718 err = security_xfrm_policy_delete( 719 pol->security); 720 if (err) { 721 xfrm_audit_policy_delete(pol, 0, 722 audit_info->loginuid, 723 audit_info->sessionid, 724 audit_info->secid); 725 return err; 726 } 727 } 728 } 729 } 730 return err; 731 } 732 #else 733 static inline int 734 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 735 { 736 return 0; 737 } 738 #endif 739 740 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 741 { 742 int dir, err = 0, cnt = 0; 743 744 write_lock_bh(&xfrm_policy_lock); 745 746 err = xfrm_policy_flush_secctx_check(net, type, audit_info); 747 if (err) 748 goto out; 749 750 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 751 struct xfrm_policy *pol; 752 struct hlist_node *entry; 753 int i; 754 755 again1: 756 hlist_for_each_entry(pol, entry, 757 &net->xfrm.policy_inexact[dir], bydst) { 758 if (pol->type != type) 759 continue; 760 __xfrm_policy_unlink(pol, dir); 761 write_unlock_bh(&xfrm_policy_lock); 762 cnt++; 763 764 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 765 audit_info->sessionid, 766 audit_info->secid); 767 768 xfrm_policy_kill(pol); 769 770 write_lock_bh(&xfrm_policy_lock); 771 goto again1; 772 } 773 774 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 775 again2: 776 hlist_for_each_entry(pol, entry, 777 net->xfrm.policy_bydst[dir].table + i, 778 bydst) { 779 if (pol->type != type) 780 continue; 781 __xfrm_policy_unlink(pol, dir); 782 write_unlock_bh(&xfrm_policy_lock); 783 cnt++; 784 785 xfrm_audit_policy_delete(pol, 1, 786 audit_info->loginuid, 787 audit_info->sessionid, 788 audit_info->secid); 789 xfrm_policy_kill(pol); 790 791 write_lock_bh(&xfrm_policy_lock); 792 goto again2; 793 } 794 } 795 796 } 797 if (!cnt) 798 err = -ESRCH; 799 out: 800 write_unlock_bh(&xfrm_policy_lock); 801 return err; 802 } 803 EXPORT_SYMBOL(xfrm_policy_flush); 804 805 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 806 int (*func)(struct xfrm_policy *, int, int, void*), 807 void *data) 808 { 809 struct xfrm_policy *pol; 810 struct xfrm_policy_walk_entry *x; 811 int error = 0; 812 813 if (walk->type >= XFRM_POLICY_TYPE_MAX && 814 walk->type != XFRM_POLICY_TYPE_ANY) 815 return -EINVAL; 816 817 if (list_empty(&walk->walk.all) && walk->seq != 0) 818 return 0; 819 820 write_lock_bh(&xfrm_policy_lock); 821 if (list_empty(&walk->walk.all)) 822 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); 823 else 824 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all); 825 list_for_each_entry_from(x, &net->xfrm.policy_all, all) { 826 if (x->dead) 827 continue; 828 pol = container_of(x, struct xfrm_policy, walk); 829 if (walk->type != XFRM_POLICY_TYPE_ANY && 830 walk->type != pol->type) 831 continue; 832 error = func(pol, xfrm_policy_id2dir(pol->index), 833 walk->seq, data); 834 if (error) { 835 list_move_tail(&walk->walk.all, &x->all); 836 goto out; 837 } 838 walk->seq++; 839 } 840 if (walk->seq == 0) { 841 error = -ENOENT; 842 goto out; 843 } 844 list_del_init(&walk->walk.all); 845 out: 846 write_unlock_bh(&xfrm_policy_lock); 847 return error; 848 } 849 EXPORT_SYMBOL(xfrm_policy_walk); 850 851 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) 852 { 853 INIT_LIST_HEAD(&walk->walk.all); 854 walk->walk.dead = 1; 855 walk->type = type; 856 walk->seq = 0; 857 } 858 EXPORT_SYMBOL(xfrm_policy_walk_init); 859 860 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk) 861 { 862 if (list_empty(&walk->walk.all)) 863 return; 864 865 write_lock_bh(&xfrm_policy_lock); 866 list_del(&walk->walk.all); 867 write_unlock_bh(&xfrm_policy_lock); 868 } 869 EXPORT_SYMBOL(xfrm_policy_walk_done); 870 871 /* 872 * Find policy to apply to this flow. 873 * 874 * Returns 0 if policy found, else an -errno. 875 */ 876 static int xfrm_policy_match(const struct xfrm_policy *pol, 877 const struct flowi *fl, 878 u8 type, u16 family, int dir) 879 { 880 const struct xfrm_selector *sel = &pol->selector; 881 int ret = -ESRCH; 882 bool match; 883 884 if (pol->family != family || 885 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 886 pol->type != type) 887 return ret; 888 889 match = xfrm_selector_match(sel, fl, family); 890 if (match) 891 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, 892 dir); 893 894 return ret; 895 } 896 897 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, 898 const struct flowi *fl, 899 u16 family, u8 dir) 900 { 901 int err; 902 struct xfrm_policy *pol, *ret; 903 const xfrm_address_t *daddr, *saddr; 904 struct hlist_node *entry; 905 struct hlist_head *chain; 906 u32 priority = ~0U; 907 908 daddr = xfrm_flowi_daddr(fl, family); 909 saddr = xfrm_flowi_saddr(fl, family); 910 if (unlikely(!daddr || !saddr)) 911 return NULL; 912 913 read_lock_bh(&xfrm_policy_lock); 914 chain = policy_hash_direct(net, daddr, saddr, family, dir); 915 ret = NULL; 916 hlist_for_each_entry(pol, entry, chain, bydst) { 917 err = xfrm_policy_match(pol, fl, type, family, dir); 918 if (err) { 919 if (err == -ESRCH) 920 continue; 921 else { 922 ret = ERR_PTR(err); 923 goto fail; 924 } 925 } else { 926 ret = pol; 927 priority = ret->priority; 928 break; 929 } 930 } 931 chain = &net->xfrm.policy_inexact[dir]; 932 hlist_for_each_entry(pol, entry, chain, bydst) { 933 err = xfrm_policy_match(pol, fl, type, family, dir); 934 if (err) { 935 if (err == -ESRCH) 936 continue; 937 else { 938 ret = ERR_PTR(err); 939 goto fail; 940 } 941 } else if (pol->priority < priority) { 942 ret = pol; 943 break; 944 } 945 } 946 if (ret) 947 xfrm_pol_hold(ret); 948 fail: 949 read_unlock_bh(&xfrm_policy_lock); 950 951 return ret; 952 } 953 954 static struct xfrm_policy * 955 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir) 956 { 957 #ifdef CONFIG_XFRM_SUB_POLICY 958 struct xfrm_policy *pol; 959 960 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); 961 if (pol != NULL) 962 return pol; 963 #endif 964 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); 965 } 966 967 static struct flow_cache_object * 968 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, 969 u8 dir, struct flow_cache_object *old_obj, void *ctx) 970 { 971 struct xfrm_policy *pol; 972 973 if (old_obj) 974 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); 975 976 pol = __xfrm_policy_lookup(net, fl, family, dir); 977 if (IS_ERR_OR_NULL(pol)) 978 return ERR_CAST(pol); 979 980 /* Resolver returns two references: 981 * one for cache and one for caller of flow_cache_lookup() */ 982 xfrm_pol_hold(pol); 983 984 return &pol->flo; 985 } 986 987 static inline int policy_to_flow_dir(int dir) 988 { 989 if (XFRM_POLICY_IN == FLOW_DIR_IN && 990 XFRM_POLICY_OUT == FLOW_DIR_OUT && 991 XFRM_POLICY_FWD == FLOW_DIR_FWD) 992 return dir; 993 switch (dir) { 994 default: 995 case XFRM_POLICY_IN: 996 return FLOW_DIR_IN; 997 case XFRM_POLICY_OUT: 998 return FLOW_DIR_OUT; 999 case XFRM_POLICY_FWD: 1000 return FLOW_DIR_FWD; 1001 } 1002 } 1003 1004 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, 1005 const struct flowi *fl) 1006 { 1007 struct xfrm_policy *pol; 1008 1009 read_lock_bh(&xfrm_policy_lock); 1010 if ((pol = sk->sk_policy[dir]) != NULL) { 1011 bool match = xfrm_selector_match(&pol->selector, fl, 1012 sk->sk_family); 1013 int err = 0; 1014 1015 if (match) { 1016 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { 1017 pol = NULL; 1018 goto out; 1019 } 1020 err = security_xfrm_policy_lookup(pol->security, 1021 fl->flowi_secid, 1022 policy_to_flow_dir(dir)); 1023 if (!err) 1024 xfrm_pol_hold(pol); 1025 else if (err == -ESRCH) 1026 pol = NULL; 1027 else 1028 pol = ERR_PTR(err); 1029 } else 1030 pol = NULL; 1031 } 1032 out: 1033 read_unlock_bh(&xfrm_policy_lock); 1034 return pol; 1035 } 1036 1037 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 1038 { 1039 struct net *net = xp_net(pol); 1040 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector, 1041 pol->family, dir); 1042 1043 list_add(&pol->walk.all, &net->xfrm.policy_all); 1044 hlist_add_head(&pol->bydst, chain); 1045 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index)); 1046 net->xfrm.policy_count[dir]++; 1047 xfrm_pol_hold(pol); 1048 1049 if (xfrm_bydst_should_resize(net, dir, NULL)) 1050 schedule_work(&net->xfrm.policy_hash_work); 1051 } 1052 1053 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 1054 int dir) 1055 { 1056 struct net *net = xp_net(pol); 1057 1058 if (hlist_unhashed(&pol->bydst)) 1059 return NULL; 1060 1061 hlist_del(&pol->bydst); 1062 hlist_del(&pol->byidx); 1063 list_del(&pol->walk.all); 1064 net->xfrm.policy_count[dir]--; 1065 1066 return pol; 1067 } 1068 1069 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 1070 { 1071 write_lock_bh(&xfrm_policy_lock); 1072 pol = __xfrm_policy_unlink(pol, dir); 1073 write_unlock_bh(&xfrm_policy_lock); 1074 if (pol) { 1075 xfrm_policy_kill(pol); 1076 return 0; 1077 } 1078 return -ENOENT; 1079 } 1080 EXPORT_SYMBOL(xfrm_policy_delete); 1081 1082 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 1083 { 1084 struct net *net = xp_net(pol); 1085 struct xfrm_policy *old_pol; 1086 1087 #ifdef CONFIG_XFRM_SUB_POLICY 1088 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 1089 return -EINVAL; 1090 #endif 1091 1092 write_lock_bh(&xfrm_policy_lock); 1093 old_pol = sk->sk_policy[dir]; 1094 sk->sk_policy[dir] = pol; 1095 if (pol) { 1096 pol->curlft.add_time = get_seconds(); 1097 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir); 1098 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1099 } 1100 if (old_pol) 1101 /* Unlinking succeeds always. This is the only function 1102 * allowed to delete or replace socket policy. 1103 */ 1104 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1105 write_unlock_bh(&xfrm_policy_lock); 1106 1107 if (old_pol) { 1108 xfrm_policy_kill(old_pol); 1109 } 1110 return 0; 1111 } 1112 1113 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) 1114 { 1115 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); 1116 1117 if (newp) { 1118 newp->selector = old->selector; 1119 if (security_xfrm_policy_clone(old->security, 1120 &newp->security)) { 1121 kfree(newp); 1122 return NULL; /* ENOMEM */ 1123 } 1124 newp->lft = old->lft; 1125 newp->curlft = old->curlft; 1126 newp->mark = old->mark; 1127 newp->action = old->action; 1128 newp->flags = old->flags; 1129 newp->xfrm_nr = old->xfrm_nr; 1130 newp->index = old->index; 1131 newp->type = old->type; 1132 memcpy(newp->xfrm_vec, old->xfrm_vec, 1133 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 1134 write_lock_bh(&xfrm_policy_lock); 1135 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); 1136 write_unlock_bh(&xfrm_policy_lock); 1137 xfrm_pol_put(newp); 1138 } 1139 return newp; 1140 } 1141 1142 int __xfrm_sk_clone_policy(struct sock *sk) 1143 { 1144 struct xfrm_policy *p0 = sk->sk_policy[0], 1145 *p1 = sk->sk_policy[1]; 1146 1147 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 1148 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 1149 return -ENOMEM; 1150 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 1151 return -ENOMEM; 1152 return 0; 1153 } 1154 1155 static int 1156 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote, 1157 unsigned short family) 1158 { 1159 int err; 1160 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1161 1162 if (unlikely(afinfo == NULL)) 1163 return -EINVAL; 1164 err = afinfo->get_saddr(net, local, remote); 1165 xfrm_policy_put_afinfo(afinfo); 1166 return err; 1167 } 1168 1169 /* Resolve list of templates for the flow, given policy. */ 1170 1171 static int 1172 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, 1173 struct xfrm_state **xfrm, unsigned short family) 1174 { 1175 struct net *net = xp_net(policy); 1176 int nx; 1177 int i, error; 1178 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 1179 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 1180 xfrm_address_t tmp; 1181 1182 for (nx=0, i = 0; i < policy->xfrm_nr; i++) { 1183 struct xfrm_state *x; 1184 xfrm_address_t *remote = daddr; 1185 xfrm_address_t *local = saddr; 1186 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 1187 1188 if (tmpl->mode == XFRM_MODE_TUNNEL || 1189 tmpl->mode == XFRM_MODE_BEET) { 1190 remote = &tmpl->id.daddr; 1191 local = &tmpl->saddr; 1192 if (xfrm_addr_any(local, tmpl->encap_family)) { 1193 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); 1194 if (error) 1195 goto fail; 1196 local = &tmp; 1197 } 1198 } 1199 1200 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 1201 1202 if (x && x->km.state == XFRM_STATE_VALID) { 1203 xfrm[nx++] = x; 1204 daddr = remote; 1205 saddr = local; 1206 continue; 1207 } 1208 if (x) { 1209 error = (x->km.state == XFRM_STATE_ERROR ? 1210 -EINVAL : -EAGAIN); 1211 xfrm_state_put(x); 1212 } 1213 else if (error == -ESRCH) 1214 error = -EAGAIN; 1215 1216 if (!tmpl->optional) 1217 goto fail; 1218 } 1219 return nx; 1220 1221 fail: 1222 for (nx--; nx>=0; nx--) 1223 xfrm_state_put(xfrm[nx]); 1224 return error; 1225 } 1226 1227 static int 1228 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, 1229 struct xfrm_state **xfrm, unsigned short family) 1230 { 1231 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 1232 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 1233 int cnx = 0; 1234 int error; 1235 int ret; 1236 int i; 1237 1238 for (i = 0; i < npols; i++) { 1239 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 1240 error = -ENOBUFS; 1241 goto fail; 1242 } 1243 1244 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 1245 if (ret < 0) { 1246 error = ret; 1247 goto fail; 1248 } else 1249 cnx += ret; 1250 } 1251 1252 /* found states are sorted for outbound processing */ 1253 if (npols > 1) 1254 xfrm_state_sort(xfrm, tpp, cnx, family); 1255 1256 return cnx; 1257 1258 fail: 1259 for (cnx--; cnx>=0; cnx--) 1260 xfrm_state_put(tpp[cnx]); 1261 return error; 1262 1263 } 1264 1265 /* Check that the bundle accepts the flow and its components are 1266 * still valid. 1267 */ 1268 1269 static inline int xfrm_get_tos(const struct flowi *fl, int family) 1270 { 1271 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1272 int tos; 1273 1274 if (!afinfo) 1275 return -EINVAL; 1276 1277 tos = afinfo->get_tos(fl); 1278 1279 xfrm_policy_put_afinfo(afinfo); 1280 1281 return tos; 1282 } 1283 1284 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) 1285 { 1286 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1287 struct dst_entry *dst = &xdst->u.dst; 1288 1289 if (xdst->route == NULL) { 1290 /* Dummy bundle - if it has xfrms we were not 1291 * able to build bundle as template resolution failed. 1292 * It means we need to try again resolving. */ 1293 if (xdst->num_xfrms > 0) 1294 return NULL; 1295 } else { 1296 /* Real bundle */ 1297 if (stale_bundle(dst)) 1298 return NULL; 1299 } 1300 1301 dst_hold(dst); 1302 return flo; 1303 } 1304 1305 static int xfrm_bundle_flo_check(struct flow_cache_object *flo) 1306 { 1307 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1308 struct dst_entry *dst = &xdst->u.dst; 1309 1310 if (!xdst->route) 1311 return 0; 1312 if (stale_bundle(dst)) 1313 return 0; 1314 1315 return 1; 1316 } 1317 1318 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) 1319 { 1320 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1321 struct dst_entry *dst = &xdst->u.dst; 1322 1323 dst_free(dst); 1324 } 1325 1326 static const struct flow_cache_ops xfrm_bundle_fc_ops = { 1327 .get = xfrm_bundle_flo_get, 1328 .check = xfrm_bundle_flo_check, 1329 .delete = xfrm_bundle_flo_delete, 1330 }; 1331 1332 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 1333 { 1334 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1335 struct dst_ops *dst_ops; 1336 struct xfrm_dst *xdst; 1337 1338 if (!afinfo) 1339 return ERR_PTR(-EINVAL); 1340 1341 switch (family) { 1342 case AF_INET: 1343 dst_ops = &net->xfrm.xfrm4_dst_ops; 1344 break; 1345 #if IS_ENABLED(CONFIG_IPV6) 1346 case AF_INET6: 1347 dst_ops = &net->xfrm.xfrm6_dst_ops; 1348 break; 1349 #endif 1350 default: 1351 BUG(); 1352 } 1353 xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0); 1354 1355 if (likely(xdst)) { 1356 struct dst_entry *dst = &xdst->u.dst; 1357 1358 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); 1359 xdst->flo.ops = &xfrm_bundle_fc_ops; 1360 if (afinfo->init_dst) 1361 afinfo->init_dst(net, xdst); 1362 } else 1363 xdst = ERR_PTR(-ENOBUFS); 1364 1365 xfrm_policy_put_afinfo(afinfo); 1366 1367 return xdst; 1368 } 1369 1370 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 1371 int nfheader_len) 1372 { 1373 struct xfrm_policy_afinfo *afinfo = 1374 xfrm_policy_get_afinfo(dst->ops->family); 1375 int err; 1376 1377 if (!afinfo) 1378 return -EINVAL; 1379 1380 err = afinfo->init_path(path, dst, nfheader_len); 1381 1382 xfrm_policy_put_afinfo(afinfo); 1383 1384 return err; 1385 } 1386 1387 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, 1388 const struct flowi *fl) 1389 { 1390 struct xfrm_policy_afinfo *afinfo = 1391 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 1392 int err; 1393 1394 if (!afinfo) 1395 return -EINVAL; 1396 1397 err = afinfo->fill_dst(xdst, dev, fl); 1398 1399 xfrm_policy_put_afinfo(afinfo); 1400 1401 return err; 1402 } 1403 1404 1405 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 1406 * all the metrics... Shortly, bundle a bundle. 1407 */ 1408 1409 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 1410 struct xfrm_state **xfrm, int nx, 1411 const struct flowi *fl, 1412 struct dst_entry *dst) 1413 { 1414 struct net *net = xp_net(policy); 1415 unsigned long now = jiffies; 1416 struct net_device *dev; 1417 struct xfrm_mode *inner_mode; 1418 struct dst_entry *dst_prev = NULL; 1419 struct dst_entry *dst0 = NULL; 1420 int i = 0; 1421 int err; 1422 int header_len = 0; 1423 int nfheader_len = 0; 1424 int trailer_len = 0; 1425 int tos; 1426 int family = policy->selector.family; 1427 xfrm_address_t saddr, daddr; 1428 1429 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 1430 1431 tos = xfrm_get_tos(fl, family); 1432 err = tos; 1433 if (tos < 0) 1434 goto put_states; 1435 1436 dst_hold(dst); 1437 1438 for (; i < nx; i++) { 1439 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 1440 struct dst_entry *dst1 = &xdst->u.dst; 1441 1442 err = PTR_ERR(xdst); 1443 if (IS_ERR(xdst)) { 1444 dst_release(dst); 1445 goto put_states; 1446 } 1447 1448 if (xfrm[i]->sel.family == AF_UNSPEC) { 1449 inner_mode = xfrm_ip2inner_mode(xfrm[i], 1450 xfrm_af2proto(family)); 1451 if (!inner_mode) { 1452 err = -EAFNOSUPPORT; 1453 dst_release(dst); 1454 goto put_states; 1455 } 1456 } else 1457 inner_mode = xfrm[i]->inner_mode; 1458 1459 if (!dst_prev) 1460 dst0 = dst1; 1461 else { 1462 dst_prev->child = dst_clone(dst1); 1463 dst1->flags |= DST_NOHASH; 1464 } 1465 1466 xdst->route = dst; 1467 dst_copy_metrics(dst1, dst); 1468 1469 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 1470 family = xfrm[i]->props.family; 1471 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr, 1472 family); 1473 err = PTR_ERR(dst); 1474 if (IS_ERR(dst)) 1475 goto put_states; 1476 } else 1477 dst_hold(dst); 1478 1479 dst1->xfrm = xfrm[i]; 1480 xdst->xfrm_genid = xfrm[i]->genid; 1481 1482 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 1483 dst1->flags |= DST_HOST; 1484 dst1->lastuse = now; 1485 1486 dst1->input = dst_discard; 1487 dst1->output = inner_mode->afinfo->output; 1488 1489 dst1->next = dst_prev; 1490 dst_prev = dst1; 1491 1492 header_len += xfrm[i]->props.header_len; 1493 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 1494 nfheader_len += xfrm[i]->props.header_len; 1495 trailer_len += xfrm[i]->props.trailer_len; 1496 } 1497 1498 dst_prev->child = dst; 1499 dst0->path = dst; 1500 1501 err = -ENODEV; 1502 dev = dst->dev; 1503 if (!dev) 1504 goto free_dst; 1505 1506 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1507 xfrm_init_pmtu(dst_prev); 1508 1509 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { 1510 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; 1511 1512 err = xfrm_fill_dst(xdst, dev, fl); 1513 if (err) 1514 goto free_dst; 1515 1516 dst_prev->header_len = header_len; 1517 dst_prev->trailer_len = trailer_len; 1518 header_len -= xdst->u.dst.xfrm->props.header_len; 1519 trailer_len -= xdst->u.dst.xfrm->props.trailer_len; 1520 } 1521 1522 out: 1523 return dst0; 1524 1525 put_states: 1526 for (; i < nx; i++) 1527 xfrm_state_put(xfrm[i]); 1528 free_dst: 1529 if (dst0) 1530 dst_free(dst0); 1531 dst0 = ERR_PTR(err); 1532 goto out; 1533 } 1534 1535 static int inline 1536 xfrm_dst_alloc_copy(void **target, const void *src, int size) 1537 { 1538 if (!*target) { 1539 *target = kmalloc(size, GFP_ATOMIC); 1540 if (!*target) 1541 return -ENOMEM; 1542 } 1543 memcpy(*target, src, size); 1544 return 0; 1545 } 1546 1547 static int inline 1548 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel) 1549 { 1550 #ifdef CONFIG_XFRM_SUB_POLICY 1551 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1552 return xfrm_dst_alloc_copy((void **)&(xdst->partner), 1553 sel, sizeof(*sel)); 1554 #else 1555 return 0; 1556 #endif 1557 } 1558 1559 static int inline 1560 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl) 1561 { 1562 #ifdef CONFIG_XFRM_SUB_POLICY 1563 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1564 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); 1565 #else 1566 return 0; 1567 #endif 1568 } 1569 1570 static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1571 struct xfrm_policy **pols, 1572 int *num_pols, int *num_xfrms) 1573 { 1574 int i; 1575 1576 if (*num_pols == 0 || !pols[0]) { 1577 *num_pols = 0; 1578 *num_xfrms = 0; 1579 return 0; 1580 } 1581 if (IS_ERR(pols[0])) 1582 return PTR_ERR(pols[0]); 1583 1584 *num_xfrms = pols[0]->xfrm_nr; 1585 1586 #ifdef CONFIG_XFRM_SUB_POLICY 1587 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && 1588 pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 1589 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), 1590 XFRM_POLICY_TYPE_MAIN, 1591 fl, family, 1592 XFRM_POLICY_OUT); 1593 if (pols[1]) { 1594 if (IS_ERR(pols[1])) { 1595 xfrm_pols_put(pols, *num_pols); 1596 return PTR_ERR(pols[1]); 1597 } 1598 (*num_pols) ++; 1599 (*num_xfrms) += pols[1]->xfrm_nr; 1600 } 1601 } 1602 #endif 1603 for (i = 0; i < *num_pols; i++) { 1604 if (pols[i]->action != XFRM_POLICY_ALLOW) { 1605 *num_xfrms = -1; 1606 break; 1607 } 1608 } 1609 1610 return 0; 1611 1612 } 1613 1614 static struct xfrm_dst * 1615 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, 1616 const struct flowi *fl, u16 family, 1617 struct dst_entry *dst_orig) 1618 { 1619 struct net *net = xp_net(pols[0]); 1620 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 1621 struct dst_entry *dst; 1622 struct xfrm_dst *xdst; 1623 int err; 1624 1625 /* Try to instantiate a bundle */ 1626 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1627 if (err <= 0) { 1628 if (err != 0 && err != -EAGAIN) 1629 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1630 return ERR_PTR(err); 1631 } 1632 1633 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); 1634 if (IS_ERR(dst)) { 1635 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); 1636 return ERR_CAST(dst); 1637 } 1638 1639 xdst = (struct xfrm_dst *)dst; 1640 xdst->num_xfrms = err; 1641 if (num_pols > 1) 1642 err = xfrm_dst_update_parent(dst, &pols[1]->selector); 1643 else 1644 err = xfrm_dst_update_origin(dst, fl); 1645 if (unlikely(err)) { 1646 dst_free(dst); 1647 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); 1648 return ERR_PTR(err); 1649 } 1650 1651 xdst->num_pols = num_pols; 1652 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1653 xdst->policy_genid = atomic_read(&pols[0]->genid); 1654 1655 return xdst; 1656 } 1657 1658 static struct flow_cache_object * 1659 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, 1660 struct flow_cache_object *oldflo, void *ctx) 1661 { 1662 struct dst_entry *dst_orig = (struct dst_entry *)ctx; 1663 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1664 struct xfrm_dst *xdst, *new_xdst; 1665 int num_pols = 0, num_xfrms = 0, i, err, pol_dead; 1666 1667 /* Check if the policies from old bundle are usable */ 1668 xdst = NULL; 1669 if (oldflo) { 1670 xdst = container_of(oldflo, struct xfrm_dst, flo); 1671 num_pols = xdst->num_pols; 1672 num_xfrms = xdst->num_xfrms; 1673 pol_dead = 0; 1674 for (i = 0; i < num_pols; i++) { 1675 pols[i] = xdst->pols[i]; 1676 pol_dead |= pols[i]->walk.dead; 1677 } 1678 if (pol_dead) { 1679 dst_free(&xdst->u.dst); 1680 xdst = NULL; 1681 num_pols = 0; 1682 num_xfrms = 0; 1683 oldflo = NULL; 1684 } 1685 } 1686 1687 /* Resolve policies to use if we couldn't get them from 1688 * previous cache entry */ 1689 if (xdst == NULL) { 1690 num_pols = 1; 1691 pols[0] = __xfrm_policy_lookup(net, fl, family, dir); 1692 err = xfrm_expand_policies(fl, family, pols, 1693 &num_pols, &num_xfrms); 1694 if (err < 0) 1695 goto inc_error; 1696 if (num_pols == 0) 1697 return NULL; 1698 if (num_xfrms <= 0) 1699 goto make_dummy_bundle; 1700 } 1701 1702 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); 1703 if (IS_ERR(new_xdst)) { 1704 err = PTR_ERR(new_xdst); 1705 if (err != -EAGAIN) 1706 goto error; 1707 if (oldflo == NULL) 1708 goto make_dummy_bundle; 1709 dst_hold(&xdst->u.dst); 1710 return oldflo; 1711 } else if (new_xdst == NULL) { 1712 num_xfrms = 0; 1713 if (oldflo == NULL) 1714 goto make_dummy_bundle; 1715 xdst->num_xfrms = 0; 1716 dst_hold(&xdst->u.dst); 1717 return oldflo; 1718 } 1719 1720 /* Kill the previous bundle */ 1721 if (xdst) { 1722 /* The policies were stolen for newly generated bundle */ 1723 xdst->num_pols = 0; 1724 dst_free(&xdst->u.dst); 1725 } 1726 1727 /* Flow cache does not have reference, it dst_free()'s, 1728 * but we do need to return one reference for original caller */ 1729 dst_hold(&new_xdst->u.dst); 1730 return &new_xdst->flo; 1731 1732 make_dummy_bundle: 1733 /* We found policies, but there's no bundles to instantiate: 1734 * either because the policy blocks, has no transformations or 1735 * we could not build template (no xfrm_states).*/ 1736 xdst = xfrm_alloc_dst(net, family); 1737 if (IS_ERR(xdst)) { 1738 xfrm_pols_put(pols, num_pols); 1739 return ERR_CAST(xdst); 1740 } 1741 xdst->num_pols = num_pols; 1742 xdst->num_xfrms = num_xfrms; 1743 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1744 1745 dst_hold(&xdst->u.dst); 1746 return &xdst->flo; 1747 1748 inc_error: 1749 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1750 error: 1751 if (xdst != NULL) 1752 dst_free(&xdst->u.dst); 1753 else 1754 xfrm_pols_put(pols, num_pols); 1755 return ERR_PTR(err); 1756 } 1757 1758 static struct dst_entry *make_blackhole(struct net *net, u16 family, 1759 struct dst_entry *dst_orig) 1760 { 1761 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1762 struct dst_entry *ret; 1763 1764 if (!afinfo) { 1765 dst_release(dst_orig); 1766 ret = ERR_PTR(-EINVAL); 1767 } else { 1768 ret = afinfo->blackhole_route(net, dst_orig); 1769 } 1770 xfrm_policy_put_afinfo(afinfo); 1771 1772 return ret; 1773 } 1774 1775 /* Main function: finds/creates a bundle for given flow. 1776 * 1777 * At the moment we eat a raw IP route. Mostly to speed up lookups 1778 * on interfaces with disabled IPsec. 1779 */ 1780 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 1781 const struct flowi *fl, 1782 struct sock *sk, int flags) 1783 { 1784 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1785 struct flow_cache_object *flo; 1786 struct xfrm_dst *xdst; 1787 struct dst_entry *dst, *route; 1788 u16 family = dst_orig->ops->family; 1789 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 1790 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 1791 1792 restart: 1793 dst = NULL; 1794 xdst = NULL; 1795 route = NULL; 1796 1797 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 1798 num_pols = 1; 1799 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1800 err = xfrm_expand_policies(fl, family, pols, 1801 &num_pols, &num_xfrms); 1802 if (err < 0) 1803 goto dropdst; 1804 1805 if (num_pols) { 1806 if (num_xfrms <= 0) { 1807 drop_pols = num_pols; 1808 goto no_transform; 1809 } 1810 1811 xdst = xfrm_resolve_and_create_bundle( 1812 pols, num_pols, fl, 1813 family, dst_orig); 1814 if (IS_ERR(xdst)) { 1815 xfrm_pols_put(pols, num_pols); 1816 err = PTR_ERR(xdst); 1817 goto dropdst; 1818 } else if (xdst == NULL) { 1819 num_xfrms = 0; 1820 drop_pols = num_pols; 1821 goto no_transform; 1822 } 1823 1824 dst_hold(&xdst->u.dst); 1825 1826 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 1827 xdst->u.dst.next = xfrm_policy_sk_bundles; 1828 xfrm_policy_sk_bundles = &xdst->u.dst; 1829 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 1830 1831 route = xdst->route; 1832 } 1833 } 1834 1835 if (xdst == NULL) { 1836 /* To accelerate a bit... */ 1837 if ((dst_orig->flags & DST_NOXFRM) || 1838 !net->xfrm.policy_count[XFRM_POLICY_OUT]) 1839 goto nopol; 1840 1841 flo = flow_cache_lookup(net, fl, family, dir, 1842 xfrm_bundle_lookup, dst_orig); 1843 if (flo == NULL) 1844 goto nopol; 1845 if (IS_ERR(flo)) { 1846 err = PTR_ERR(flo); 1847 goto dropdst; 1848 } 1849 xdst = container_of(flo, struct xfrm_dst, flo); 1850 1851 num_pols = xdst->num_pols; 1852 num_xfrms = xdst->num_xfrms; 1853 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols); 1854 route = xdst->route; 1855 } 1856 1857 dst = &xdst->u.dst; 1858 if (route == NULL && num_xfrms > 0) { 1859 /* The only case when xfrm_bundle_lookup() returns a 1860 * bundle with null route, is when the template could 1861 * not be resolved. It means policies are there, but 1862 * bundle could not be created, since we don't yet 1863 * have the xfrm_state's. We need to wait for KM to 1864 * negotiate new SA's or bail out with error.*/ 1865 if (net->xfrm.sysctl_larval_drop) { 1866 /* EREMOTE tells the caller to generate 1867 * a one-shot blackhole route. */ 1868 dst_release(dst); 1869 xfrm_pols_put(pols, drop_pols); 1870 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1871 1872 return make_blackhole(net, family, dst_orig); 1873 } 1874 if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) { 1875 DECLARE_WAITQUEUE(wait, current); 1876 1877 add_wait_queue(&net->xfrm.km_waitq, &wait); 1878 set_current_state(TASK_INTERRUPTIBLE); 1879 schedule(); 1880 set_current_state(TASK_RUNNING); 1881 remove_wait_queue(&net->xfrm.km_waitq, &wait); 1882 1883 if (!signal_pending(current)) { 1884 dst_release(dst); 1885 goto restart; 1886 } 1887 1888 err = -ERESTART; 1889 } else 1890 err = -EAGAIN; 1891 1892 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1893 goto error; 1894 } 1895 1896 no_transform: 1897 if (num_pols == 0) 1898 goto nopol; 1899 1900 if ((flags & XFRM_LOOKUP_ICMP) && 1901 !(pols[0]->flags & XFRM_POLICY_ICMP)) { 1902 err = -ENOENT; 1903 goto error; 1904 } 1905 1906 for (i = 0; i < num_pols; i++) 1907 pols[i]->curlft.use_time = get_seconds(); 1908 1909 if (num_xfrms < 0) { 1910 /* Prohibit the flow */ 1911 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 1912 err = -EPERM; 1913 goto error; 1914 } else if (num_xfrms > 0) { 1915 /* Flow transformed */ 1916 dst_release(dst_orig); 1917 } else { 1918 /* Flow passes untransformed */ 1919 dst_release(dst); 1920 dst = dst_orig; 1921 } 1922 ok: 1923 xfrm_pols_put(pols, drop_pols); 1924 if (dst && dst->xfrm && 1925 dst->xfrm->props.mode == XFRM_MODE_TUNNEL) 1926 dst->flags |= DST_XFRM_TUNNEL; 1927 return dst; 1928 1929 nopol: 1930 if (!(flags & XFRM_LOOKUP_ICMP)) { 1931 dst = dst_orig; 1932 goto ok; 1933 } 1934 err = -ENOENT; 1935 error: 1936 dst_release(dst); 1937 dropdst: 1938 dst_release(dst_orig); 1939 xfrm_pols_put(pols, drop_pols); 1940 return ERR_PTR(err); 1941 } 1942 EXPORT_SYMBOL(xfrm_lookup); 1943 1944 static inline int 1945 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) 1946 { 1947 struct xfrm_state *x; 1948 1949 if (!skb->sp || idx < 0 || idx >= skb->sp->len) 1950 return 0; 1951 x = skb->sp->xvec[idx]; 1952 if (!x->type->reject) 1953 return 0; 1954 return x->type->reject(x, skb, fl); 1955 } 1956 1957 /* When skb is transformed back to its "native" form, we have to 1958 * check policy restrictions. At the moment we make this in maximally 1959 * stupid way. Shame on me. :-) Of course, connected sockets must 1960 * have policy cached at them. 1961 */ 1962 1963 static inline int 1964 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, 1965 unsigned short family) 1966 { 1967 if (xfrm_state_kern(x)) 1968 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 1969 return x->id.proto == tmpl->id.proto && 1970 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1971 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1972 x->props.mode == tmpl->mode && 1973 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 1974 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 1975 !(x->props.mode != XFRM_MODE_TRANSPORT && 1976 xfrm_state_addr_cmp(tmpl, x, family)); 1977 } 1978 1979 /* 1980 * 0 or more than 0 is returned when validation is succeeded (either bypass 1981 * because of optional transport mode, or next index of the mathced secpath 1982 * state with the template. 1983 * -1 is returned when no matching template is found. 1984 * Otherwise "-2 - errored_index" is returned. 1985 */ 1986 static inline int 1987 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, 1988 unsigned short family) 1989 { 1990 int idx = start; 1991 1992 if (tmpl->optional) { 1993 if (tmpl->mode == XFRM_MODE_TRANSPORT) 1994 return start; 1995 } else 1996 start = -1; 1997 for (; idx < sp->len; idx++) { 1998 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 1999 return ++idx; 2000 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 2001 if (start == -1) 2002 start = -2-idx; 2003 break; 2004 } 2005 } 2006 return start; 2007 } 2008 2009 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 2010 unsigned int family, int reverse) 2011 { 2012 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2013 int err; 2014 2015 if (unlikely(afinfo == NULL)) 2016 return -EAFNOSUPPORT; 2017 2018 afinfo->decode_session(skb, fl, reverse); 2019 err = security_xfrm_decode_session(skb, &fl->flowi_secid); 2020 xfrm_policy_put_afinfo(afinfo); 2021 return err; 2022 } 2023 EXPORT_SYMBOL(__xfrm_decode_session); 2024 2025 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) 2026 { 2027 for (; k < sp->len; k++) { 2028 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 2029 *idxp = k; 2030 return 1; 2031 } 2032 } 2033 2034 return 0; 2035 } 2036 2037 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 2038 unsigned short family) 2039 { 2040 struct net *net = dev_net(skb->dev); 2041 struct xfrm_policy *pol; 2042 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 2043 int npols = 0; 2044 int xfrm_nr; 2045 int pi; 2046 int reverse; 2047 struct flowi fl; 2048 u8 fl_dir; 2049 int xerr_idx = -1; 2050 2051 reverse = dir & ~XFRM_POLICY_MASK; 2052 dir &= XFRM_POLICY_MASK; 2053 fl_dir = policy_to_flow_dir(dir); 2054 2055 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { 2056 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 2057 return 0; 2058 } 2059 2060 nf_nat_decode_session(skb, &fl, family); 2061 2062 /* First, check used SA against their selectors. */ 2063 if (skb->sp) { 2064 int i; 2065 2066 for (i=skb->sp->len-1; i>=0; i--) { 2067 struct xfrm_state *x = skb->sp->xvec[i]; 2068 if (!xfrm_selector_match(&x->sel, &fl, family)) { 2069 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 2070 return 0; 2071 } 2072 } 2073 } 2074 2075 pol = NULL; 2076 if (sk && sk->sk_policy[dir]) { 2077 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2078 if (IS_ERR(pol)) { 2079 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2080 return 0; 2081 } 2082 } 2083 2084 if (!pol) { 2085 struct flow_cache_object *flo; 2086 2087 flo = flow_cache_lookup(net, &fl, family, fl_dir, 2088 xfrm_policy_lookup, NULL); 2089 if (IS_ERR_OR_NULL(flo)) 2090 pol = ERR_CAST(flo); 2091 else 2092 pol = container_of(flo, struct xfrm_policy, flo); 2093 } 2094 2095 if (IS_ERR(pol)) { 2096 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2097 return 0; 2098 } 2099 2100 if (!pol) { 2101 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { 2102 xfrm_secpath_reject(xerr_idx, skb, &fl); 2103 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 2104 return 0; 2105 } 2106 return 1; 2107 } 2108 2109 pol->curlft.use_time = get_seconds(); 2110 2111 pols[0] = pol; 2112 npols ++; 2113 #ifdef CONFIG_XFRM_SUB_POLICY 2114 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 2115 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, 2116 &fl, family, 2117 XFRM_POLICY_IN); 2118 if (pols[1]) { 2119 if (IS_ERR(pols[1])) { 2120 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2121 return 0; 2122 } 2123 pols[1]->curlft.use_time = get_seconds(); 2124 npols ++; 2125 } 2126 } 2127 #endif 2128 2129 if (pol->action == XFRM_POLICY_ALLOW) { 2130 struct sec_path *sp; 2131 static struct sec_path dummy; 2132 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 2133 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 2134 struct xfrm_tmpl **tpp = tp; 2135 int ti = 0; 2136 int i, k; 2137 2138 if ((sp = skb->sp) == NULL) 2139 sp = &dummy; 2140 2141 for (pi = 0; pi < npols; pi++) { 2142 if (pols[pi] != pol && 2143 pols[pi]->action != XFRM_POLICY_ALLOW) { 2144 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2145 goto reject; 2146 } 2147 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 2148 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 2149 goto reject_error; 2150 } 2151 for (i = 0; i < pols[pi]->xfrm_nr; i++) 2152 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 2153 } 2154 xfrm_nr = ti; 2155 if (npols > 1) { 2156 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); 2157 tpp = stp; 2158 } 2159 2160 /* For each tunnel xfrm, find the first matching tmpl. 2161 * For each tmpl before that, find corresponding xfrm. 2162 * Order is _important_. Later we will implement 2163 * some barriers, but at the moment barriers 2164 * are implied between each two transformations. 2165 */ 2166 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 2167 k = xfrm_policy_ok(tpp[i], sp, k, family); 2168 if (k < 0) { 2169 if (k < -1) 2170 /* "-2 - errored_index" returned */ 2171 xerr_idx = -(2+k); 2172 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2173 goto reject; 2174 } 2175 } 2176 2177 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 2178 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2179 goto reject; 2180 } 2181 2182 xfrm_pols_put(pols, npols); 2183 return 1; 2184 } 2185 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2186 2187 reject: 2188 xfrm_secpath_reject(xerr_idx, skb, &fl); 2189 reject_error: 2190 xfrm_pols_put(pols, npols); 2191 return 0; 2192 } 2193 EXPORT_SYMBOL(__xfrm_policy_check); 2194 2195 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 2196 { 2197 struct net *net = dev_net(skb->dev); 2198 struct flowi fl; 2199 struct dst_entry *dst; 2200 int res = 1; 2201 2202 if (xfrm_decode_session(skb, &fl, family) < 0) { 2203 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 2204 return 0; 2205 } 2206 2207 skb_dst_force(skb); 2208 2209 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0); 2210 if (IS_ERR(dst)) { 2211 res = 0; 2212 dst = NULL; 2213 } 2214 skb_dst_set(skb, dst); 2215 return res; 2216 } 2217 EXPORT_SYMBOL(__xfrm_route_forward); 2218 2219 /* Optimize later using cookies and generation ids. */ 2220 2221 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 2222 { 2223 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 2224 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to 2225 * get validated by dst_ops->check on every use. We do this 2226 * because when a normal route referenced by an XFRM dst is 2227 * obsoleted we do not go looking around for all parent 2228 * referencing XFRM dsts so that we can invalidate them. It 2229 * is just too much work. Instead we make the checks here on 2230 * every use. For example: 2231 * 2232 * XFRM dst A --> IPv4 dst X 2233 * 2234 * X is the "xdst->route" of A (X is also the "dst->path" of A 2235 * in this example). If X is marked obsolete, "A" will not 2236 * notice. That's what we are validating here via the 2237 * stale_bundle() check. 2238 * 2239 * When a policy's bundle is pruned, we dst_free() the XFRM 2240 * dst which causes it's ->obsolete field to be set to 2241 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like 2242 * this, we want to force a new route lookup. 2243 */ 2244 if (dst->obsolete < 0 && !stale_bundle(dst)) 2245 return dst; 2246 2247 return NULL; 2248 } 2249 2250 static int stale_bundle(struct dst_entry *dst) 2251 { 2252 return !xfrm_bundle_ok((struct xfrm_dst *)dst); 2253 } 2254 2255 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2256 { 2257 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 2258 dst->dev = dev_net(dev)->loopback_dev; 2259 dev_hold(dst->dev); 2260 dev_put(dev); 2261 } 2262 } 2263 EXPORT_SYMBOL(xfrm_dst_ifdown); 2264 2265 static void xfrm_link_failure(struct sk_buff *skb) 2266 { 2267 /* Impossible. Such dst must be popped before reaches point of failure. */ 2268 } 2269 2270 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 2271 { 2272 if (dst) { 2273 if (dst->obsolete) { 2274 dst_release(dst); 2275 dst = NULL; 2276 } 2277 } 2278 return dst; 2279 } 2280 2281 static void __xfrm_garbage_collect(struct net *net) 2282 { 2283 struct dst_entry *head, *next; 2284 2285 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 2286 head = xfrm_policy_sk_bundles; 2287 xfrm_policy_sk_bundles = NULL; 2288 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 2289 2290 while (head) { 2291 next = head->next; 2292 dst_free(head); 2293 head = next; 2294 } 2295 } 2296 2297 static void xfrm_garbage_collect(struct net *net) 2298 { 2299 flow_cache_flush(); 2300 __xfrm_garbage_collect(net); 2301 } 2302 2303 static void xfrm_garbage_collect_deferred(struct net *net) 2304 { 2305 flow_cache_flush_deferred(); 2306 __xfrm_garbage_collect(net); 2307 } 2308 2309 static void xfrm_init_pmtu(struct dst_entry *dst) 2310 { 2311 do { 2312 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2313 u32 pmtu, route_mtu_cached; 2314 2315 pmtu = dst_mtu(dst->child); 2316 xdst->child_mtu_cached = pmtu; 2317 2318 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 2319 2320 route_mtu_cached = dst_mtu(xdst->route); 2321 xdst->route_mtu_cached = route_mtu_cached; 2322 2323 if (pmtu > route_mtu_cached) 2324 pmtu = route_mtu_cached; 2325 2326 dst_metric_set(dst, RTAX_MTU, pmtu); 2327 } while ((dst = dst->next)); 2328 } 2329 2330 /* Check that the bundle accepts the flow and its components are 2331 * still valid. 2332 */ 2333 2334 static int xfrm_bundle_ok(struct xfrm_dst *first) 2335 { 2336 struct dst_entry *dst = &first->u.dst; 2337 struct xfrm_dst *last; 2338 u32 mtu; 2339 2340 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 2341 (dst->dev && !netif_running(dst->dev))) 2342 return 0; 2343 2344 last = NULL; 2345 2346 do { 2347 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2348 2349 if (dst->xfrm->km.state != XFRM_STATE_VALID) 2350 return 0; 2351 if (xdst->xfrm_genid != dst->xfrm->genid) 2352 return 0; 2353 if (xdst->num_pols > 0 && 2354 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 2355 return 0; 2356 2357 mtu = dst_mtu(dst->child); 2358 if (xdst->child_mtu_cached != mtu) { 2359 last = xdst; 2360 xdst->child_mtu_cached = mtu; 2361 } 2362 2363 if (!dst_check(xdst->route, xdst->route_cookie)) 2364 return 0; 2365 mtu = dst_mtu(xdst->route); 2366 if (xdst->route_mtu_cached != mtu) { 2367 last = xdst; 2368 xdst->route_mtu_cached = mtu; 2369 } 2370 2371 dst = dst->child; 2372 } while (dst->xfrm); 2373 2374 if (likely(!last)) 2375 return 1; 2376 2377 mtu = last->child_mtu_cached; 2378 for (;;) { 2379 dst = &last->u.dst; 2380 2381 mtu = xfrm_state_mtu(dst->xfrm, mtu); 2382 if (mtu > last->route_mtu_cached) 2383 mtu = last->route_mtu_cached; 2384 dst_metric_set(dst, RTAX_MTU, mtu); 2385 2386 if (last == first) 2387 break; 2388 2389 last = (struct xfrm_dst *)last->u.dst.next; 2390 last->child_mtu_cached = mtu; 2391 } 2392 2393 return 1; 2394 } 2395 2396 static unsigned int xfrm_default_advmss(const struct dst_entry *dst) 2397 { 2398 return dst_metric_advmss(dst->path); 2399 } 2400 2401 static unsigned int xfrm_mtu(const struct dst_entry *dst) 2402 { 2403 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 2404 2405 return mtu ? : dst_mtu(dst->path); 2406 } 2407 2408 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, 2409 struct sk_buff *skb, 2410 const void *daddr) 2411 { 2412 return dst->path->ops->neigh_lookup(dst, skb, daddr); 2413 } 2414 2415 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2416 { 2417 struct net *net; 2418 int err = 0; 2419 if (unlikely(afinfo == NULL)) 2420 return -EINVAL; 2421 if (unlikely(afinfo->family >= NPROTO)) 2422 return -EAFNOSUPPORT; 2423 write_lock_bh(&xfrm_policy_afinfo_lock); 2424 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 2425 err = -ENOBUFS; 2426 else { 2427 struct dst_ops *dst_ops = afinfo->dst_ops; 2428 if (likely(dst_ops->kmem_cachep == NULL)) 2429 dst_ops->kmem_cachep = xfrm_dst_cache; 2430 if (likely(dst_ops->check == NULL)) 2431 dst_ops->check = xfrm_dst_check; 2432 if (likely(dst_ops->default_advmss == NULL)) 2433 dst_ops->default_advmss = xfrm_default_advmss; 2434 if (likely(dst_ops->mtu == NULL)) 2435 dst_ops->mtu = xfrm_mtu; 2436 if (likely(dst_ops->negative_advice == NULL)) 2437 dst_ops->negative_advice = xfrm_negative_advice; 2438 if (likely(dst_ops->link_failure == NULL)) 2439 dst_ops->link_failure = xfrm_link_failure; 2440 if (likely(dst_ops->neigh_lookup == NULL)) 2441 dst_ops->neigh_lookup = xfrm_neigh_lookup; 2442 if (likely(afinfo->garbage_collect == NULL)) 2443 afinfo->garbage_collect = xfrm_garbage_collect_deferred; 2444 xfrm_policy_afinfo[afinfo->family] = afinfo; 2445 } 2446 write_unlock_bh(&xfrm_policy_afinfo_lock); 2447 2448 rtnl_lock(); 2449 for_each_net(net) { 2450 struct dst_ops *xfrm_dst_ops; 2451 2452 switch (afinfo->family) { 2453 case AF_INET: 2454 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; 2455 break; 2456 #if IS_ENABLED(CONFIG_IPV6) 2457 case AF_INET6: 2458 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; 2459 break; 2460 #endif 2461 default: 2462 BUG(); 2463 } 2464 *xfrm_dst_ops = *afinfo->dst_ops; 2465 } 2466 rtnl_unlock(); 2467 2468 return err; 2469 } 2470 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 2471 2472 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) 2473 { 2474 int err = 0; 2475 if (unlikely(afinfo == NULL)) 2476 return -EINVAL; 2477 if (unlikely(afinfo->family >= NPROTO)) 2478 return -EAFNOSUPPORT; 2479 write_lock_bh(&xfrm_policy_afinfo_lock); 2480 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 2481 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 2482 err = -EINVAL; 2483 else { 2484 struct dst_ops *dst_ops = afinfo->dst_ops; 2485 xfrm_policy_afinfo[afinfo->family] = NULL; 2486 dst_ops->kmem_cachep = NULL; 2487 dst_ops->check = NULL; 2488 dst_ops->negative_advice = NULL; 2489 dst_ops->link_failure = NULL; 2490 afinfo->garbage_collect = NULL; 2491 } 2492 } 2493 write_unlock_bh(&xfrm_policy_afinfo_lock); 2494 return err; 2495 } 2496 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2497 2498 static void __net_init xfrm_dst_ops_init(struct net *net) 2499 { 2500 struct xfrm_policy_afinfo *afinfo; 2501 2502 read_lock_bh(&xfrm_policy_afinfo_lock); 2503 afinfo = xfrm_policy_afinfo[AF_INET]; 2504 if (afinfo) 2505 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; 2506 #if IS_ENABLED(CONFIG_IPV6) 2507 afinfo = xfrm_policy_afinfo[AF_INET6]; 2508 if (afinfo) 2509 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; 2510 #endif 2511 read_unlock_bh(&xfrm_policy_afinfo_lock); 2512 } 2513 2514 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2515 { 2516 struct xfrm_policy_afinfo *afinfo; 2517 if (unlikely(family >= NPROTO)) 2518 return NULL; 2519 read_lock(&xfrm_policy_afinfo_lock); 2520 afinfo = xfrm_policy_afinfo[family]; 2521 if (unlikely(!afinfo)) 2522 read_unlock(&xfrm_policy_afinfo_lock); 2523 return afinfo; 2524 } 2525 2526 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) 2527 { 2528 read_unlock(&xfrm_policy_afinfo_lock); 2529 } 2530 2531 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 2532 { 2533 struct net_device *dev = ptr; 2534 2535 switch (event) { 2536 case NETDEV_DOWN: 2537 xfrm_garbage_collect(dev_net(dev)); 2538 } 2539 return NOTIFY_DONE; 2540 } 2541 2542 static struct notifier_block xfrm_dev_notifier = { 2543 .notifier_call = xfrm_dev_event, 2544 }; 2545 2546 #ifdef CONFIG_XFRM_STATISTICS 2547 static int __net_init xfrm_statistics_init(struct net *net) 2548 { 2549 int rv; 2550 2551 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, 2552 sizeof(struct linux_xfrm_mib), 2553 __alignof__(struct linux_xfrm_mib)) < 0) 2554 return -ENOMEM; 2555 rv = xfrm_proc_init(net); 2556 if (rv < 0) 2557 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2558 return rv; 2559 } 2560 2561 static void xfrm_statistics_fini(struct net *net) 2562 { 2563 xfrm_proc_fini(net); 2564 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2565 } 2566 #else 2567 static int __net_init xfrm_statistics_init(struct net *net) 2568 { 2569 return 0; 2570 } 2571 2572 static void xfrm_statistics_fini(struct net *net) 2573 { 2574 } 2575 #endif 2576 2577 static int __net_init xfrm_policy_init(struct net *net) 2578 { 2579 unsigned int hmask, sz; 2580 int dir; 2581 2582 if (net_eq(net, &init_net)) 2583 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 2584 sizeof(struct xfrm_dst), 2585 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2586 NULL); 2587 2588 hmask = 8 - 1; 2589 sz = (hmask+1) * sizeof(struct hlist_head); 2590 2591 net->xfrm.policy_byidx = xfrm_hash_alloc(sz); 2592 if (!net->xfrm.policy_byidx) 2593 goto out_byidx; 2594 net->xfrm.policy_idx_hmask = hmask; 2595 2596 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2597 struct xfrm_policy_hash *htab; 2598 2599 net->xfrm.policy_count[dir] = 0; 2600 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 2601 2602 htab = &net->xfrm.policy_bydst[dir]; 2603 htab->table = xfrm_hash_alloc(sz); 2604 if (!htab->table) 2605 goto out_bydst; 2606 htab->hmask = hmask; 2607 } 2608 2609 INIT_LIST_HEAD(&net->xfrm.policy_all); 2610 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 2611 if (net_eq(net, &init_net)) 2612 register_netdevice_notifier(&xfrm_dev_notifier); 2613 return 0; 2614 2615 out_bydst: 2616 for (dir--; dir >= 0; dir--) { 2617 struct xfrm_policy_hash *htab; 2618 2619 htab = &net->xfrm.policy_bydst[dir]; 2620 xfrm_hash_free(htab->table, sz); 2621 } 2622 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2623 out_byidx: 2624 return -ENOMEM; 2625 } 2626 2627 static void xfrm_policy_fini(struct net *net) 2628 { 2629 struct xfrm_audit audit_info; 2630 unsigned int sz; 2631 int dir; 2632 2633 flush_work(&net->xfrm.policy_hash_work); 2634 #ifdef CONFIG_XFRM_SUB_POLICY 2635 audit_info.loginuid = -1; 2636 audit_info.sessionid = -1; 2637 audit_info.secid = 0; 2638 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info); 2639 #endif 2640 audit_info.loginuid = -1; 2641 audit_info.sessionid = -1; 2642 audit_info.secid = 0; 2643 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); 2644 2645 WARN_ON(!list_empty(&net->xfrm.policy_all)); 2646 2647 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2648 struct xfrm_policy_hash *htab; 2649 2650 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 2651 2652 htab = &net->xfrm.policy_bydst[dir]; 2653 sz = (htab->hmask + 1); 2654 WARN_ON(!hlist_empty(htab->table)); 2655 xfrm_hash_free(htab->table, sz); 2656 } 2657 2658 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); 2659 WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); 2660 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2661 } 2662 2663 static int __net_init xfrm_net_init(struct net *net) 2664 { 2665 int rv; 2666 2667 rv = xfrm_statistics_init(net); 2668 if (rv < 0) 2669 goto out_statistics; 2670 rv = xfrm_state_init(net); 2671 if (rv < 0) 2672 goto out_state; 2673 rv = xfrm_policy_init(net); 2674 if (rv < 0) 2675 goto out_policy; 2676 xfrm_dst_ops_init(net); 2677 rv = xfrm_sysctl_init(net); 2678 if (rv < 0) 2679 goto out_sysctl; 2680 return 0; 2681 2682 out_sysctl: 2683 xfrm_policy_fini(net); 2684 out_policy: 2685 xfrm_state_fini(net); 2686 out_state: 2687 xfrm_statistics_fini(net); 2688 out_statistics: 2689 return rv; 2690 } 2691 2692 static void __net_exit xfrm_net_exit(struct net *net) 2693 { 2694 xfrm_sysctl_fini(net); 2695 xfrm_policy_fini(net); 2696 xfrm_state_fini(net); 2697 xfrm_statistics_fini(net); 2698 } 2699 2700 static struct pernet_operations __net_initdata xfrm_net_ops = { 2701 .init = xfrm_net_init, 2702 .exit = xfrm_net_exit, 2703 }; 2704 2705 void __init xfrm_init(void) 2706 { 2707 register_pernet_subsys(&xfrm_net_ops); 2708 xfrm_input_init(); 2709 } 2710 2711 #ifdef CONFIG_AUDITSYSCALL 2712 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 2713 struct audit_buffer *audit_buf) 2714 { 2715 struct xfrm_sec_ctx *ctx = xp->security; 2716 struct xfrm_selector *sel = &xp->selector; 2717 2718 if (ctx) 2719 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 2720 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 2721 2722 switch(sel->family) { 2723 case AF_INET: 2724 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); 2725 if (sel->prefixlen_s != 32) 2726 audit_log_format(audit_buf, " src_prefixlen=%d", 2727 sel->prefixlen_s); 2728 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); 2729 if (sel->prefixlen_d != 32) 2730 audit_log_format(audit_buf, " dst_prefixlen=%d", 2731 sel->prefixlen_d); 2732 break; 2733 case AF_INET6: 2734 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); 2735 if (sel->prefixlen_s != 128) 2736 audit_log_format(audit_buf, " src_prefixlen=%d", 2737 sel->prefixlen_s); 2738 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); 2739 if (sel->prefixlen_d != 128) 2740 audit_log_format(audit_buf, " dst_prefixlen=%d", 2741 sel->prefixlen_d); 2742 break; 2743 } 2744 } 2745 2746 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 2747 uid_t auid, u32 sessionid, u32 secid) 2748 { 2749 struct audit_buffer *audit_buf; 2750 2751 audit_buf = xfrm_audit_start("SPD-add"); 2752 if (audit_buf == NULL) 2753 return; 2754 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2755 audit_log_format(audit_buf, " res=%u", result); 2756 xfrm_audit_common_policyinfo(xp, audit_buf); 2757 audit_log_end(audit_buf); 2758 } 2759 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 2760 2761 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 2762 uid_t auid, u32 sessionid, u32 secid) 2763 { 2764 struct audit_buffer *audit_buf; 2765 2766 audit_buf = xfrm_audit_start("SPD-delete"); 2767 if (audit_buf == NULL) 2768 return; 2769 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2770 audit_log_format(audit_buf, " res=%u", result); 2771 xfrm_audit_common_policyinfo(xp, audit_buf); 2772 audit_log_end(audit_buf); 2773 } 2774 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 2775 #endif 2776 2777 #ifdef CONFIG_XFRM_MIGRATE 2778 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 2779 const struct xfrm_selector *sel_tgt) 2780 { 2781 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 2782 if (sel_tgt->family == sel_cmp->family && 2783 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr, 2784 sel_cmp->family) == 0 && 2785 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr, 2786 sel_cmp->family) == 0 && 2787 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 2788 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 2789 return true; 2790 } 2791 } else { 2792 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 2793 return true; 2794 } 2795 } 2796 return false; 2797 } 2798 2799 static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, 2800 u8 dir, u8 type) 2801 { 2802 struct xfrm_policy *pol, *ret = NULL; 2803 struct hlist_node *entry; 2804 struct hlist_head *chain; 2805 u32 priority = ~0U; 2806 2807 read_lock_bh(&xfrm_policy_lock); 2808 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); 2809 hlist_for_each_entry(pol, entry, chain, bydst) { 2810 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2811 pol->type == type) { 2812 ret = pol; 2813 priority = ret->priority; 2814 break; 2815 } 2816 } 2817 chain = &init_net.xfrm.policy_inexact[dir]; 2818 hlist_for_each_entry(pol, entry, chain, bydst) { 2819 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2820 pol->type == type && 2821 pol->priority < priority) { 2822 ret = pol; 2823 break; 2824 } 2825 } 2826 2827 if (ret) 2828 xfrm_pol_hold(ret); 2829 2830 read_unlock_bh(&xfrm_policy_lock); 2831 2832 return ret; 2833 } 2834 2835 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) 2836 { 2837 int match = 0; 2838 2839 if (t->mode == m->mode && t->id.proto == m->proto && 2840 (m->reqid == 0 || t->reqid == m->reqid)) { 2841 switch (t->mode) { 2842 case XFRM_MODE_TUNNEL: 2843 case XFRM_MODE_BEET: 2844 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr, 2845 m->old_family) == 0 && 2846 xfrm_addr_cmp(&t->saddr, &m->old_saddr, 2847 m->old_family) == 0) { 2848 match = 1; 2849 } 2850 break; 2851 case XFRM_MODE_TRANSPORT: 2852 /* in case of transport mode, template does not store 2853 any IP addresses, hence we just compare mode and 2854 protocol */ 2855 match = 1; 2856 break; 2857 default: 2858 break; 2859 } 2860 } 2861 return match; 2862 } 2863 2864 /* update endpoint address(es) of template(s) */ 2865 static int xfrm_policy_migrate(struct xfrm_policy *pol, 2866 struct xfrm_migrate *m, int num_migrate) 2867 { 2868 struct xfrm_migrate *mp; 2869 int i, j, n = 0; 2870 2871 write_lock_bh(&pol->lock); 2872 if (unlikely(pol->walk.dead)) { 2873 /* target policy has been deleted */ 2874 write_unlock_bh(&pol->lock); 2875 return -ENOENT; 2876 } 2877 2878 for (i = 0; i < pol->xfrm_nr; i++) { 2879 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 2880 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 2881 continue; 2882 n++; 2883 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 2884 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 2885 continue; 2886 /* update endpoints */ 2887 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 2888 sizeof(pol->xfrm_vec[i].id.daddr)); 2889 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 2890 sizeof(pol->xfrm_vec[i].saddr)); 2891 pol->xfrm_vec[i].encap_family = mp->new_family; 2892 /* flush bundles */ 2893 atomic_inc(&pol->genid); 2894 } 2895 } 2896 2897 write_unlock_bh(&pol->lock); 2898 2899 if (!n) 2900 return -ENODATA; 2901 2902 return 0; 2903 } 2904 2905 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) 2906 { 2907 int i, j; 2908 2909 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) 2910 return -EINVAL; 2911 2912 for (i = 0; i < num_migrate; i++) { 2913 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr, 2914 m[i].old_family) == 0) && 2915 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr, 2916 m[i].old_family) == 0)) 2917 return -EINVAL; 2918 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 2919 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 2920 return -EINVAL; 2921 2922 /* check if there is any duplicated entry */ 2923 for (j = i + 1; j < num_migrate; j++) { 2924 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 2925 sizeof(m[i].old_daddr)) && 2926 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 2927 sizeof(m[i].old_saddr)) && 2928 m[i].proto == m[j].proto && 2929 m[i].mode == m[j].mode && 2930 m[i].reqid == m[j].reqid && 2931 m[i].old_family == m[j].old_family) 2932 return -EINVAL; 2933 } 2934 } 2935 2936 return 0; 2937 } 2938 2939 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2940 struct xfrm_migrate *m, int num_migrate, 2941 struct xfrm_kmaddress *k) 2942 { 2943 int i, err, nx_cur = 0, nx_new = 0; 2944 struct xfrm_policy *pol = NULL; 2945 struct xfrm_state *x, *xc; 2946 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 2947 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 2948 struct xfrm_migrate *mp; 2949 2950 if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 2951 goto out; 2952 2953 /* Stage 1 - find policy */ 2954 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) { 2955 err = -ENOENT; 2956 goto out; 2957 } 2958 2959 /* Stage 2 - find and update state(s) */ 2960 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 2961 if ((x = xfrm_migrate_state_find(mp))) { 2962 x_cur[nx_cur] = x; 2963 nx_cur++; 2964 if ((xc = xfrm_state_migrate(x, mp))) { 2965 x_new[nx_new] = xc; 2966 nx_new++; 2967 } else { 2968 err = -ENODATA; 2969 goto restore_state; 2970 } 2971 } 2972 } 2973 2974 /* Stage 3 - update policy */ 2975 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) 2976 goto restore_state; 2977 2978 /* Stage 4 - delete old state(s) */ 2979 if (nx_cur) { 2980 xfrm_states_put(x_cur, nx_cur); 2981 xfrm_states_delete(x_cur, nx_cur); 2982 } 2983 2984 /* Stage 5 - announce */ 2985 km_migrate(sel, dir, type, m, num_migrate, k); 2986 2987 xfrm_pol_put(pol); 2988 2989 return 0; 2990 out: 2991 return err; 2992 2993 restore_state: 2994 if (pol) 2995 xfrm_pol_put(pol); 2996 if (nx_cur) 2997 xfrm_states_put(x_cur, nx_cur); 2998 if (nx_new) 2999 xfrm_states_delete(x_new, nx_new); 3000 3001 return err; 3002 } 3003 EXPORT_SYMBOL(xfrm_migrate); 3004 #endif 3005