1 /* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/kmod.h> 19 #include <linux/list.h> 20 #include <linux/spinlock.h> 21 #include <linux/workqueue.h> 22 #include <linux/notifier.h> 23 #include <linux/netdevice.h> 24 #include <linux/netfilter.h> 25 #include <linux/module.h> 26 #include <linux/cache.h> 27 #include <linux/audit.h> 28 #include <net/dst.h> 29 #include <net/flow.h> 30 #include <net/xfrm.h> 31 #include <net/ip.h> 32 #ifdef CONFIG_XFRM_STATISTICS 33 #include <net/snmp.h> 34 #endif 35 36 #include "xfrm_hash.h" 37 38 DEFINE_MUTEX(xfrm_cfg_mutex); 39 EXPORT_SYMBOL(xfrm_cfg_mutex); 40 41 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); 42 static struct dst_entry *xfrm_policy_sk_bundles; 43 static DEFINE_RWLOCK(xfrm_policy_lock); 44 45 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 46 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 47 48 static struct kmem_cache *xfrm_dst_cache __read_mostly; 49 50 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); 51 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 52 static void xfrm_init_pmtu(struct dst_entry *dst); 53 static int stale_bundle(struct dst_entry *dst); 54 static int xfrm_bundle_ok(struct xfrm_dst *xdst); 55 56 57 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 58 int dir); 59 60 static inline bool 61 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 62 { 63 const struct flowi4 *fl4 = &fl->u.ip4; 64 65 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && 66 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && 67 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && 68 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && 69 (fl4->flowi4_proto == sel->proto || !sel->proto) && 70 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 71 } 72 73 static inline bool 74 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 75 { 76 const struct flowi6 *fl6 = &fl->u.ip6; 77 78 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && 79 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && 80 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && 81 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && 82 (fl6->flowi6_proto == sel->proto || !sel->proto) && 83 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 84 } 85 86 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 87 unsigned short family) 88 { 89 switch (family) { 90 case AF_INET: 91 return __xfrm4_selector_match(sel, fl); 92 case AF_INET6: 93 return __xfrm6_selector_match(sel, fl); 94 } 95 return false; 96 } 97 98 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, 99 const xfrm_address_t *saddr, 100 const xfrm_address_t *daddr, 101 int family) 102 { 103 struct xfrm_policy_afinfo *afinfo; 104 struct dst_entry *dst; 105 106 afinfo = xfrm_policy_get_afinfo(family); 107 if (unlikely(afinfo == NULL)) 108 return ERR_PTR(-EAFNOSUPPORT); 109 110 dst = afinfo->dst_lookup(net, tos, saddr, daddr); 111 112 xfrm_policy_put_afinfo(afinfo); 113 114 return dst; 115 } 116 117 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, 118 xfrm_address_t *prev_saddr, 119 xfrm_address_t *prev_daddr, 120 int family) 121 { 122 struct net *net = xs_net(x); 123 xfrm_address_t *saddr = &x->props.saddr; 124 xfrm_address_t *daddr = &x->id.daddr; 125 struct dst_entry *dst; 126 127 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 128 saddr = x->coaddr; 129 daddr = prev_daddr; 130 } 131 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 132 saddr = prev_saddr; 133 daddr = x->coaddr; 134 } 135 136 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family); 137 138 if (!IS_ERR(dst)) { 139 if (prev_saddr != saddr) 140 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 141 if (prev_daddr != daddr) 142 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 143 } 144 145 return dst; 146 } 147 148 static inline unsigned long make_jiffies(long secs) 149 { 150 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 151 return MAX_SCHEDULE_TIMEOUT-1; 152 else 153 return secs*HZ; 154 } 155 156 static void xfrm_policy_timer(unsigned long data) 157 { 158 struct xfrm_policy *xp = (struct xfrm_policy*)data; 159 unsigned long now = get_seconds(); 160 long next = LONG_MAX; 161 int warn = 0; 162 int dir; 163 164 read_lock(&xp->lock); 165 166 if (unlikely(xp->walk.dead)) 167 goto out; 168 169 dir = xfrm_policy_id2dir(xp->index); 170 171 if (xp->lft.hard_add_expires_seconds) { 172 long tmo = xp->lft.hard_add_expires_seconds + 173 xp->curlft.add_time - now; 174 if (tmo <= 0) 175 goto expired; 176 if (tmo < next) 177 next = tmo; 178 } 179 if (xp->lft.hard_use_expires_seconds) { 180 long tmo = xp->lft.hard_use_expires_seconds + 181 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 182 if (tmo <= 0) 183 goto expired; 184 if (tmo < next) 185 next = tmo; 186 } 187 if (xp->lft.soft_add_expires_seconds) { 188 long tmo = xp->lft.soft_add_expires_seconds + 189 xp->curlft.add_time - now; 190 if (tmo <= 0) { 191 warn = 1; 192 tmo = XFRM_KM_TIMEOUT; 193 } 194 if (tmo < next) 195 next = tmo; 196 } 197 if (xp->lft.soft_use_expires_seconds) { 198 long tmo = xp->lft.soft_use_expires_seconds + 199 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 200 if (tmo <= 0) { 201 warn = 1; 202 tmo = XFRM_KM_TIMEOUT; 203 } 204 if (tmo < next) 205 next = tmo; 206 } 207 208 if (warn) 209 km_policy_expired(xp, dir, 0, 0); 210 if (next != LONG_MAX && 211 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 212 xfrm_pol_hold(xp); 213 214 out: 215 read_unlock(&xp->lock); 216 xfrm_pol_put(xp); 217 return; 218 219 expired: 220 read_unlock(&xp->lock); 221 if (!xfrm_policy_delete(xp, dir)) 222 km_policy_expired(xp, dir, 1, 0); 223 xfrm_pol_put(xp); 224 } 225 226 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) 227 { 228 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 229 230 if (unlikely(pol->walk.dead)) 231 flo = NULL; 232 else 233 xfrm_pol_hold(pol); 234 235 return flo; 236 } 237 238 static int xfrm_policy_flo_check(struct flow_cache_object *flo) 239 { 240 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 241 242 return !pol->walk.dead; 243 } 244 245 static void xfrm_policy_flo_delete(struct flow_cache_object *flo) 246 { 247 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); 248 } 249 250 static const struct flow_cache_ops xfrm_policy_fc_ops = { 251 .get = xfrm_policy_flo_get, 252 .check = xfrm_policy_flo_check, 253 .delete = xfrm_policy_flo_delete, 254 }; 255 256 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 257 * SPD calls. 258 */ 259 260 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) 261 { 262 struct xfrm_policy *policy; 263 264 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 265 266 if (policy) { 267 write_pnet(&policy->xp_net, net); 268 INIT_LIST_HEAD(&policy->walk.all); 269 INIT_HLIST_NODE(&policy->bydst); 270 INIT_HLIST_NODE(&policy->byidx); 271 rwlock_init(&policy->lock); 272 atomic_set(&policy->refcnt, 1); 273 setup_timer(&policy->timer, xfrm_policy_timer, 274 (unsigned long)policy); 275 policy->flo.ops = &xfrm_policy_fc_ops; 276 } 277 return policy; 278 } 279 EXPORT_SYMBOL(xfrm_policy_alloc); 280 281 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 282 283 void xfrm_policy_destroy(struct xfrm_policy *policy) 284 { 285 BUG_ON(!policy->walk.dead); 286 287 if (del_timer(&policy->timer)) 288 BUG(); 289 290 security_xfrm_policy_free(policy->security); 291 kfree(policy); 292 } 293 EXPORT_SYMBOL(xfrm_policy_destroy); 294 295 /* Rule must be locked. Release descentant resources, announce 296 * entry dead. The rule must be unlinked from lists to the moment. 297 */ 298 299 static void xfrm_policy_kill(struct xfrm_policy *policy) 300 { 301 policy->walk.dead = 1; 302 303 atomic_inc(&policy->genid); 304 305 if (del_timer(&policy->timer)) 306 xfrm_pol_put(policy); 307 308 xfrm_pol_put(policy); 309 } 310 311 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 312 313 static inline unsigned int idx_hash(struct net *net, u32 index) 314 { 315 return __idx_hash(index, net->xfrm.policy_idx_hmask); 316 } 317 318 static struct hlist_head *policy_hash_bysel(struct net *net, 319 const struct xfrm_selector *sel, 320 unsigned short family, int dir) 321 { 322 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 323 unsigned int hash = __sel_hash(sel, family, hmask); 324 325 return (hash == hmask + 1 ? 326 &net->xfrm.policy_inexact[dir] : 327 net->xfrm.policy_bydst[dir].table + hash); 328 } 329 330 static struct hlist_head *policy_hash_direct(struct net *net, 331 const xfrm_address_t *daddr, 332 const xfrm_address_t *saddr, 333 unsigned short family, int dir) 334 { 335 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 336 unsigned int hash = __addr_hash(daddr, saddr, family, hmask); 337 338 return net->xfrm.policy_bydst[dir].table + hash; 339 } 340 341 static void xfrm_dst_hash_transfer(struct hlist_head *list, 342 struct hlist_head *ndsttable, 343 unsigned int nhashmask) 344 { 345 struct hlist_node *entry, *tmp, *entry0 = NULL; 346 struct xfrm_policy *pol; 347 unsigned int h0 = 0; 348 349 redo: 350 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { 351 unsigned int h; 352 353 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 354 pol->family, nhashmask); 355 if (!entry0) { 356 hlist_del(entry); 357 hlist_add_head(&pol->bydst, ndsttable+h); 358 h0 = h; 359 } else { 360 if (h != h0) 361 continue; 362 hlist_del(entry); 363 hlist_add_after(entry0, &pol->bydst); 364 } 365 entry0 = entry; 366 } 367 if (!hlist_empty(list)) { 368 entry0 = NULL; 369 goto redo; 370 } 371 } 372 373 static void xfrm_idx_hash_transfer(struct hlist_head *list, 374 struct hlist_head *nidxtable, 375 unsigned int nhashmask) 376 { 377 struct hlist_node *entry, *tmp; 378 struct xfrm_policy *pol; 379 380 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { 381 unsigned int h; 382 383 h = __idx_hash(pol->index, nhashmask); 384 hlist_add_head(&pol->byidx, nidxtable+h); 385 } 386 } 387 388 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 389 { 390 return ((old_hmask + 1) << 1) - 1; 391 } 392 393 static void xfrm_bydst_resize(struct net *net, int dir) 394 { 395 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 396 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 397 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 398 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table; 399 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 400 int i; 401 402 if (!ndst) 403 return; 404 405 write_lock_bh(&xfrm_policy_lock); 406 407 for (i = hmask; i >= 0; i--) 408 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); 409 410 net->xfrm.policy_bydst[dir].table = ndst; 411 net->xfrm.policy_bydst[dir].hmask = nhashmask; 412 413 write_unlock_bh(&xfrm_policy_lock); 414 415 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 416 } 417 418 static void xfrm_byidx_resize(struct net *net, int total) 419 { 420 unsigned int hmask = net->xfrm.policy_idx_hmask; 421 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 422 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 423 struct hlist_head *oidx = net->xfrm.policy_byidx; 424 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 425 int i; 426 427 if (!nidx) 428 return; 429 430 write_lock_bh(&xfrm_policy_lock); 431 432 for (i = hmask; i >= 0; i--) 433 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 434 435 net->xfrm.policy_byidx = nidx; 436 net->xfrm.policy_idx_hmask = nhashmask; 437 438 write_unlock_bh(&xfrm_policy_lock); 439 440 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 441 } 442 443 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) 444 { 445 unsigned int cnt = net->xfrm.policy_count[dir]; 446 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 447 448 if (total) 449 *total += cnt; 450 451 if ((hmask + 1) < xfrm_policy_hashmax && 452 cnt > hmask) 453 return 1; 454 455 return 0; 456 } 457 458 static inline int xfrm_byidx_should_resize(struct net *net, int total) 459 { 460 unsigned int hmask = net->xfrm.policy_idx_hmask; 461 462 if ((hmask + 1) < xfrm_policy_hashmax && 463 total > hmask) 464 return 1; 465 466 return 0; 467 } 468 469 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 470 { 471 read_lock_bh(&xfrm_policy_lock); 472 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 473 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 474 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 475 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 476 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 477 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 478 si->spdhcnt = net->xfrm.policy_idx_hmask; 479 si->spdhmcnt = xfrm_policy_hashmax; 480 read_unlock_bh(&xfrm_policy_lock); 481 } 482 EXPORT_SYMBOL(xfrm_spd_getinfo); 483 484 static DEFINE_MUTEX(hash_resize_mutex); 485 static void xfrm_hash_resize(struct work_struct *work) 486 { 487 struct net *net = container_of(work, struct net, xfrm.policy_hash_work); 488 int dir, total; 489 490 mutex_lock(&hash_resize_mutex); 491 492 total = 0; 493 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 494 if (xfrm_bydst_should_resize(net, dir, &total)) 495 xfrm_bydst_resize(net, dir); 496 } 497 if (xfrm_byidx_should_resize(net, total)) 498 xfrm_byidx_resize(net, total); 499 500 mutex_unlock(&hash_resize_mutex); 501 } 502 503 /* Generate new index... KAME seems to generate them ordered by cost 504 * of an absolute inpredictability of ordering of rules. This will not pass. */ 505 static u32 xfrm_gen_index(struct net *net, int dir) 506 { 507 static u32 idx_generator; 508 509 for (;;) { 510 struct hlist_node *entry; 511 struct hlist_head *list; 512 struct xfrm_policy *p; 513 u32 idx; 514 int found; 515 516 idx = (idx_generator | dir); 517 idx_generator += 8; 518 if (idx == 0) 519 idx = 8; 520 list = net->xfrm.policy_byidx + idx_hash(net, idx); 521 found = 0; 522 hlist_for_each_entry(p, entry, list, byidx) { 523 if (p->index == idx) { 524 found = 1; 525 break; 526 } 527 } 528 if (!found) 529 return idx; 530 } 531 } 532 533 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 534 { 535 u32 *p1 = (u32 *) s1; 536 u32 *p2 = (u32 *) s2; 537 int len = sizeof(struct xfrm_selector) / sizeof(u32); 538 int i; 539 540 for (i = 0; i < len; i++) { 541 if (p1[i] != p2[i]) 542 return 1; 543 } 544 545 return 0; 546 } 547 548 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 549 { 550 struct net *net = xp_net(policy); 551 struct xfrm_policy *pol; 552 struct xfrm_policy *delpol; 553 struct hlist_head *chain; 554 struct hlist_node *entry, *newpos; 555 u32 mark = policy->mark.v & policy->mark.m; 556 557 write_lock_bh(&xfrm_policy_lock); 558 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 559 delpol = NULL; 560 newpos = NULL; 561 hlist_for_each_entry(pol, entry, chain, bydst) { 562 if (pol->type == policy->type && 563 !selector_cmp(&pol->selector, &policy->selector) && 564 (mark & pol->mark.m) == pol->mark.v && 565 xfrm_sec_ctx_match(pol->security, policy->security) && 566 !WARN_ON(delpol)) { 567 if (excl) { 568 write_unlock_bh(&xfrm_policy_lock); 569 return -EEXIST; 570 } 571 delpol = pol; 572 if (policy->priority > pol->priority) 573 continue; 574 } else if (policy->priority >= pol->priority) { 575 newpos = &pol->bydst; 576 continue; 577 } 578 if (delpol) 579 break; 580 } 581 if (newpos) 582 hlist_add_after(newpos, &policy->bydst); 583 else 584 hlist_add_head(&policy->bydst, chain); 585 xfrm_pol_hold(policy); 586 net->xfrm.policy_count[dir]++; 587 atomic_inc(&flow_cache_genid); 588 if (delpol) 589 __xfrm_policy_unlink(delpol, dir); 590 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); 591 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 592 policy->curlft.add_time = get_seconds(); 593 policy->curlft.use_time = 0; 594 if (!mod_timer(&policy->timer, jiffies + HZ)) 595 xfrm_pol_hold(policy); 596 list_add(&policy->walk.all, &net->xfrm.policy_all); 597 write_unlock_bh(&xfrm_policy_lock); 598 599 if (delpol) 600 xfrm_policy_kill(delpol); 601 else if (xfrm_bydst_should_resize(net, dir, NULL)) 602 schedule_work(&net->xfrm.policy_hash_work); 603 604 return 0; 605 } 606 EXPORT_SYMBOL(xfrm_policy_insert); 607 608 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, 609 int dir, struct xfrm_selector *sel, 610 struct xfrm_sec_ctx *ctx, int delete, 611 int *err) 612 { 613 struct xfrm_policy *pol, *ret; 614 struct hlist_head *chain; 615 struct hlist_node *entry; 616 617 *err = 0; 618 write_lock_bh(&xfrm_policy_lock); 619 chain = policy_hash_bysel(net, sel, sel->family, dir); 620 ret = NULL; 621 hlist_for_each_entry(pol, entry, chain, bydst) { 622 if (pol->type == type && 623 (mark & pol->mark.m) == pol->mark.v && 624 !selector_cmp(sel, &pol->selector) && 625 xfrm_sec_ctx_match(ctx, pol->security)) { 626 xfrm_pol_hold(pol); 627 if (delete) { 628 *err = security_xfrm_policy_delete( 629 pol->security); 630 if (*err) { 631 write_unlock_bh(&xfrm_policy_lock); 632 return pol; 633 } 634 __xfrm_policy_unlink(pol, dir); 635 } 636 ret = pol; 637 break; 638 } 639 } 640 write_unlock_bh(&xfrm_policy_lock); 641 642 if (ret && delete) 643 xfrm_policy_kill(ret); 644 return ret; 645 } 646 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 647 648 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, 649 int dir, u32 id, int delete, int *err) 650 { 651 struct xfrm_policy *pol, *ret; 652 struct hlist_head *chain; 653 struct hlist_node *entry; 654 655 *err = -ENOENT; 656 if (xfrm_policy_id2dir(id) != dir) 657 return NULL; 658 659 *err = 0; 660 write_lock_bh(&xfrm_policy_lock); 661 chain = net->xfrm.policy_byidx + idx_hash(net, id); 662 ret = NULL; 663 hlist_for_each_entry(pol, entry, chain, byidx) { 664 if (pol->type == type && pol->index == id && 665 (mark & pol->mark.m) == pol->mark.v) { 666 xfrm_pol_hold(pol); 667 if (delete) { 668 *err = security_xfrm_policy_delete( 669 pol->security); 670 if (*err) { 671 write_unlock_bh(&xfrm_policy_lock); 672 return pol; 673 } 674 __xfrm_policy_unlink(pol, dir); 675 } 676 ret = pol; 677 break; 678 } 679 } 680 write_unlock_bh(&xfrm_policy_lock); 681 682 if (ret && delete) 683 xfrm_policy_kill(ret); 684 return ret; 685 } 686 EXPORT_SYMBOL(xfrm_policy_byid); 687 688 #ifdef CONFIG_SECURITY_NETWORK_XFRM 689 static inline int 690 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 691 { 692 int dir, err = 0; 693 694 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 695 struct xfrm_policy *pol; 696 struct hlist_node *entry; 697 int i; 698 699 hlist_for_each_entry(pol, entry, 700 &net->xfrm.policy_inexact[dir], bydst) { 701 if (pol->type != type) 702 continue; 703 err = security_xfrm_policy_delete(pol->security); 704 if (err) { 705 xfrm_audit_policy_delete(pol, 0, 706 audit_info->loginuid, 707 audit_info->sessionid, 708 audit_info->secid); 709 return err; 710 } 711 } 712 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 713 hlist_for_each_entry(pol, entry, 714 net->xfrm.policy_bydst[dir].table + i, 715 bydst) { 716 if (pol->type != type) 717 continue; 718 err = security_xfrm_policy_delete( 719 pol->security); 720 if (err) { 721 xfrm_audit_policy_delete(pol, 0, 722 audit_info->loginuid, 723 audit_info->sessionid, 724 audit_info->secid); 725 return err; 726 } 727 } 728 } 729 } 730 return err; 731 } 732 #else 733 static inline int 734 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 735 { 736 return 0; 737 } 738 #endif 739 740 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 741 { 742 int dir, err = 0, cnt = 0; 743 744 write_lock_bh(&xfrm_policy_lock); 745 746 err = xfrm_policy_flush_secctx_check(net, type, audit_info); 747 if (err) 748 goto out; 749 750 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 751 struct xfrm_policy *pol; 752 struct hlist_node *entry; 753 int i; 754 755 again1: 756 hlist_for_each_entry(pol, entry, 757 &net->xfrm.policy_inexact[dir], bydst) { 758 if (pol->type != type) 759 continue; 760 __xfrm_policy_unlink(pol, dir); 761 write_unlock_bh(&xfrm_policy_lock); 762 cnt++; 763 764 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 765 audit_info->sessionid, 766 audit_info->secid); 767 768 xfrm_policy_kill(pol); 769 770 write_lock_bh(&xfrm_policy_lock); 771 goto again1; 772 } 773 774 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 775 again2: 776 hlist_for_each_entry(pol, entry, 777 net->xfrm.policy_bydst[dir].table + i, 778 bydst) { 779 if (pol->type != type) 780 continue; 781 __xfrm_policy_unlink(pol, dir); 782 write_unlock_bh(&xfrm_policy_lock); 783 cnt++; 784 785 xfrm_audit_policy_delete(pol, 1, 786 audit_info->loginuid, 787 audit_info->sessionid, 788 audit_info->secid); 789 xfrm_policy_kill(pol); 790 791 write_lock_bh(&xfrm_policy_lock); 792 goto again2; 793 } 794 } 795 796 } 797 if (!cnt) 798 err = -ESRCH; 799 out: 800 write_unlock_bh(&xfrm_policy_lock); 801 return err; 802 } 803 EXPORT_SYMBOL(xfrm_policy_flush); 804 805 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 806 int (*func)(struct xfrm_policy *, int, int, void*), 807 void *data) 808 { 809 struct xfrm_policy *pol; 810 struct xfrm_policy_walk_entry *x; 811 int error = 0; 812 813 if (walk->type >= XFRM_POLICY_TYPE_MAX && 814 walk->type != XFRM_POLICY_TYPE_ANY) 815 return -EINVAL; 816 817 if (list_empty(&walk->walk.all) && walk->seq != 0) 818 return 0; 819 820 write_lock_bh(&xfrm_policy_lock); 821 if (list_empty(&walk->walk.all)) 822 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); 823 else 824 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all); 825 list_for_each_entry_from(x, &net->xfrm.policy_all, all) { 826 if (x->dead) 827 continue; 828 pol = container_of(x, struct xfrm_policy, walk); 829 if (walk->type != XFRM_POLICY_TYPE_ANY && 830 walk->type != pol->type) 831 continue; 832 error = func(pol, xfrm_policy_id2dir(pol->index), 833 walk->seq, data); 834 if (error) { 835 list_move_tail(&walk->walk.all, &x->all); 836 goto out; 837 } 838 walk->seq++; 839 } 840 if (walk->seq == 0) { 841 error = -ENOENT; 842 goto out; 843 } 844 list_del_init(&walk->walk.all); 845 out: 846 write_unlock_bh(&xfrm_policy_lock); 847 return error; 848 } 849 EXPORT_SYMBOL(xfrm_policy_walk); 850 851 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) 852 { 853 INIT_LIST_HEAD(&walk->walk.all); 854 walk->walk.dead = 1; 855 walk->type = type; 856 walk->seq = 0; 857 } 858 EXPORT_SYMBOL(xfrm_policy_walk_init); 859 860 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk) 861 { 862 if (list_empty(&walk->walk.all)) 863 return; 864 865 write_lock_bh(&xfrm_policy_lock); 866 list_del(&walk->walk.all); 867 write_unlock_bh(&xfrm_policy_lock); 868 } 869 EXPORT_SYMBOL(xfrm_policy_walk_done); 870 871 /* 872 * Find policy to apply to this flow. 873 * 874 * Returns 0 if policy found, else an -errno. 875 */ 876 static int xfrm_policy_match(const struct xfrm_policy *pol, 877 const struct flowi *fl, 878 u8 type, u16 family, int dir) 879 { 880 const struct xfrm_selector *sel = &pol->selector; 881 int ret = -ESRCH; 882 bool match; 883 884 if (pol->family != family || 885 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 886 pol->type != type) 887 return ret; 888 889 match = xfrm_selector_match(sel, fl, family); 890 if (match) 891 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, 892 dir); 893 894 return ret; 895 } 896 897 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, 898 const struct flowi *fl, 899 u16 family, u8 dir) 900 { 901 int err; 902 struct xfrm_policy *pol, *ret; 903 const xfrm_address_t *daddr, *saddr; 904 struct hlist_node *entry; 905 struct hlist_head *chain; 906 u32 priority = ~0U; 907 908 daddr = xfrm_flowi_daddr(fl, family); 909 saddr = xfrm_flowi_saddr(fl, family); 910 if (unlikely(!daddr || !saddr)) 911 return NULL; 912 913 read_lock_bh(&xfrm_policy_lock); 914 chain = policy_hash_direct(net, daddr, saddr, family, dir); 915 ret = NULL; 916 hlist_for_each_entry(pol, entry, chain, bydst) { 917 err = xfrm_policy_match(pol, fl, type, family, dir); 918 if (err) { 919 if (err == -ESRCH) 920 continue; 921 else { 922 ret = ERR_PTR(err); 923 goto fail; 924 } 925 } else { 926 ret = pol; 927 priority = ret->priority; 928 break; 929 } 930 } 931 chain = &net->xfrm.policy_inexact[dir]; 932 hlist_for_each_entry(pol, entry, chain, bydst) { 933 err = xfrm_policy_match(pol, fl, type, family, dir); 934 if (err) { 935 if (err == -ESRCH) 936 continue; 937 else { 938 ret = ERR_PTR(err); 939 goto fail; 940 } 941 } else if (pol->priority < priority) { 942 ret = pol; 943 break; 944 } 945 } 946 if (ret) 947 xfrm_pol_hold(ret); 948 fail: 949 read_unlock_bh(&xfrm_policy_lock); 950 951 return ret; 952 } 953 954 static struct xfrm_policy * 955 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir) 956 { 957 #ifdef CONFIG_XFRM_SUB_POLICY 958 struct xfrm_policy *pol; 959 960 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); 961 if (pol != NULL) 962 return pol; 963 #endif 964 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); 965 } 966 967 static struct flow_cache_object * 968 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, 969 u8 dir, struct flow_cache_object *old_obj, void *ctx) 970 { 971 struct xfrm_policy *pol; 972 973 if (old_obj) 974 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); 975 976 pol = __xfrm_policy_lookup(net, fl, family, dir); 977 if (IS_ERR_OR_NULL(pol)) 978 return ERR_CAST(pol); 979 980 /* Resolver returns two references: 981 * one for cache and one for caller of flow_cache_lookup() */ 982 xfrm_pol_hold(pol); 983 984 return &pol->flo; 985 } 986 987 static inline int policy_to_flow_dir(int dir) 988 { 989 if (XFRM_POLICY_IN == FLOW_DIR_IN && 990 XFRM_POLICY_OUT == FLOW_DIR_OUT && 991 XFRM_POLICY_FWD == FLOW_DIR_FWD) 992 return dir; 993 switch (dir) { 994 default: 995 case XFRM_POLICY_IN: 996 return FLOW_DIR_IN; 997 case XFRM_POLICY_OUT: 998 return FLOW_DIR_OUT; 999 case XFRM_POLICY_FWD: 1000 return FLOW_DIR_FWD; 1001 } 1002 } 1003 1004 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, 1005 const struct flowi *fl) 1006 { 1007 struct xfrm_policy *pol; 1008 1009 read_lock_bh(&xfrm_policy_lock); 1010 if ((pol = sk->sk_policy[dir]) != NULL) { 1011 bool match = xfrm_selector_match(&pol->selector, fl, 1012 sk->sk_family); 1013 int err = 0; 1014 1015 if (match) { 1016 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { 1017 pol = NULL; 1018 goto out; 1019 } 1020 err = security_xfrm_policy_lookup(pol->security, 1021 fl->flowi_secid, 1022 policy_to_flow_dir(dir)); 1023 if (!err) 1024 xfrm_pol_hold(pol); 1025 else if (err == -ESRCH) 1026 pol = NULL; 1027 else 1028 pol = ERR_PTR(err); 1029 } else 1030 pol = NULL; 1031 } 1032 out: 1033 read_unlock_bh(&xfrm_policy_lock); 1034 return pol; 1035 } 1036 1037 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 1038 { 1039 struct net *net = xp_net(pol); 1040 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector, 1041 pol->family, dir); 1042 1043 list_add(&pol->walk.all, &net->xfrm.policy_all); 1044 hlist_add_head(&pol->bydst, chain); 1045 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index)); 1046 net->xfrm.policy_count[dir]++; 1047 xfrm_pol_hold(pol); 1048 1049 if (xfrm_bydst_should_resize(net, dir, NULL)) 1050 schedule_work(&net->xfrm.policy_hash_work); 1051 } 1052 1053 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 1054 int dir) 1055 { 1056 struct net *net = xp_net(pol); 1057 1058 if (hlist_unhashed(&pol->bydst)) 1059 return NULL; 1060 1061 hlist_del(&pol->bydst); 1062 hlist_del(&pol->byidx); 1063 list_del(&pol->walk.all); 1064 net->xfrm.policy_count[dir]--; 1065 1066 return pol; 1067 } 1068 1069 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 1070 { 1071 write_lock_bh(&xfrm_policy_lock); 1072 pol = __xfrm_policy_unlink(pol, dir); 1073 write_unlock_bh(&xfrm_policy_lock); 1074 if (pol) { 1075 xfrm_policy_kill(pol); 1076 return 0; 1077 } 1078 return -ENOENT; 1079 } 1080 EXPORT_SYMBOL(xfrm_policy_delete); 1081 1082 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 1083 { 1084 struct net *net = xp_net(pol); 1085 struct xfrm_policy *old_pol; 1086 1087 #ifdef CONFIG_XFRM_SUB_POLICY 1088 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 1089 return -EINVAL; 1090 #endif 1091 1092 write_lock_bh(&xfrm_policy_lock); 1093 old_pol = sk->sk_policy[dir]; 1094 sk->sk_policy[dir] = pol; 1095 if (pol) { 1096 pol->curlft.add_time = get_seconds(); 1097 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir); 1098 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1099 } 1100 if (old_pol) 1101 /* Unlinking succeeds always. This is the only function 1102 * allowed to delete or replace socket policy. 1103 */ 1104 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1105 write_unlock_bh(&xfrm_policy_lock); 1106 1107 if (old_pol) { 1108 xfrm_policy_kill(old_pol); 1109 } 1110 return 0; 1111 } 1112 1113 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) 1114 { 1115 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); 1116 1117 if (newp) { 1118 newp->selector = old->selector; 1119 if (security_xfrm_policy_clone(old->security, 1120 &newp->security)) { 1121 kfree(newp); 1122 return NULL; /* ENOMEM */ 1123 } 1124 newp->lft = old->lft; 1125 newp->curlft = old->curlft; 1126 newp->mark = old->mark; 1127 newp->action = old->action; 1128 newp->flags = old->flags; 1129 newp->xfrm_nr = old->xfrm_nr; 1130 newp->index = old->index; 1131 newp->type = old->type; 1132 memcpy(newp->xfrm_vec, old->xfrm_vec, 1133 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 1134 write_lock_bh(&xfrm_policy_lock); 1135 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); 1136 write_unlock_bh(&xfrm_policy_lock); 1137 xfrm_pol_put(newp); 1138 } 1139 return newp; 1140 } 1141 1142 int __xfrm_sk_clone_policy(struct sock *sk) 1143 { 1144 struct xfrm_policy *p0 = sk->sk_policy[0], 1145 *p1 = sk->sk_policy[1]; 1146 1147 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 1148 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 1149 return -ENOMEM; 1150 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 1151 return -ENOMEM; 1152 return 0; 1153 } 1154 1155 static int 1156 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote, 1157 unsigned short family) 1158 { 1159 int err; 1160 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1161 1162 if (unlikely(afinfo == NULL)) 1163 return -EINVAL; 1164 err = afinfo->get_saddr(net, local, remote); 1165 xfrm_policy_put_afinfo(afinfo); 1166 return err; 1167 } 1168 1169 /* Resolve list of templates for the flow, given policy. */ 1170 1171 static int 1172 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, 1173 struct xfrm_state **xfrm, unsigned short family) 1174 { 1175 struct net *net = xp_net(policy); 1176 int nx; 1177 int i, error; 1178 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 1179 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 1180 xfrm_address_t tmp; 1181 1182 for (nx=0, i = 0; i < policy->xfrm_nr; i++) { 1183 struct xfrm_state *x; 1184 xfrm_address_t *remote = daddr; 1185 xfrm_address_t *local = saddr; 1186 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 1187 1188 if (tmpl->mode == XFRM_MODE_TUNNEL || 1189 tmpl->mode == XFRM_MODE_BEET) { 1190 remote = &tmpl->id.daddr; 1191 local = &tmpl->saddr; 1192 if (xfrm_addr_any(local, tmpl->encap_family)) { 1193 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); 1194 if (error) 1195 goto fail; 1196 local = &tmp; 1197 } 1198 } 1199 1200 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 1201 1202 if (x && x->km.state == XFRM_STATE_VALID) { 1203 xfrm[nx++] = x; 1204 daddr = remote; 1205 saddr = local; 1206 continue; 1207 } 1208 if (x) { 1209 error = (x->km.state == XFRM_STATE_ERROR ? 1210 -EINVAL : -EAGAIN); 1211 xfrm_state_put(x); 1212 } 1213 else if (error == -ESRCH) 1214 error = -EAGAIN; 1215 1216 if (!tmpl->optional) 1217 goto fail; 1218 } 1219 return nx; 1220 1221 fail: 1222 for (nx--; nx>=0; nx--) 1223 xfrm_state_put(xfrm[nx]); 1224 return error; 1225 } 1226 1227 static int 1228 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, 1229 struct xfrm_state **xfrm, unsigned short family) 1230 { 1231 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 1232 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 1233 int cnx = 0; 1234 int error; 1235 int ret; 1236 int i; 1237 1238 for (i = 0; i < npols; i++) { 1239 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 1240 error = -ENOBUFS; 1241 goto fail; 1242 } 1243 1244 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 1245 if (ret < 0) { 1246 error = ret; 1247 goto fail; 1248 } else 1249 cnx += ret; 1250 } 1251 1252 /* found states are sorted for outbound processing */ 1253 if (npols > 1) 1254 xfrm_state_sort(xfrm, tpp, cnx, family); 1255 1256 return cnx; 1257 1258 fail: 1259 for (cnx--; cnx>=0; cnx--) 1260 xfrm_state_put(tpp[cnx]); 1261 return error; 1262 1263 } 1264 1265 /* Check that the bundle accepts the flow and its components are 1266 * still valid. 1267 */ 1268 1269 static inline int xfrm_get_tos(const struct flowi *fl, int family) 1270 { 1271 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1272 int tos; 1273 1274 if (!afinfo) 1275 return -EINVAL; 1276 1277 tos = afinfo->get_tos(fl); 1278 1279 xfrm_policy_put_afinfo(afinfo); 1280 1281 return tos; 1282 } 1283 1284 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) 1285 { 1286 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1287 struct dst_entry *dst = &xdst->u.dst; 1288 1289 if (xdst->route == NULL) { 1290 /* Dummy bundle - if it has xfrms we were not 1291 * able to build bundle as template resolution failed. 1292 * It means we need to try again resolving. */ 1293 if (xdst->num_xfrms > 0) 1294 return NULL; 1295 } else { 1296 /* Real bundle */ 1297 if (stale_bundle(dst)) 1298 return NULL; 1299 } 1300 1301 dst_hold(dst); 1302 return flo; 1303 } 1304 1305 static int xfrm_bundle_flo_check(struct flow_cache_object *flo) 1306 { 1307 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1308 struct dst_entry *dst = &xdst->u.dst; 1309 1310 if (!xdst->route) 1311 return 0; 1312 if (stale_bundle(dst)) 1313 return 0; 1314 1315 return 1; 1316 } 1317 1318 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) 1319 { 1320 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1321 struct dst_entry *dst = &xdst->u.dst; 1322 1323 dst_free(dst); 1324 } 1325 1326 static const struct flow_cache_ops xfrm_bundle_fc_ops = { 1327 .get = xfrm_bundle_flo_get, 1328 .check = xfrm_bundle_flo_check, 1329 .delete = xfrm_bundle_flo_delete, 1330 }; 1331 1332 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 1333 { 1334 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1335 struct dst_ops *dst_ops; 1336 struct xfrm_dst *xdst; 1337 1338 if (!afinfo) 1339 return ERR_PTR(-EINVAL); 1340 1341 switch (family) { 1342 case AF_INET: 1343 dst_ops = &net->xfrm.xfrm4_dst_ops; 1344 break; 1345 #if IS_ENABLED(CONFIG_IPV6) 1346 case AF_INET6: 1347 dst_ops = &net->xfrm.xfrm6_dst_ops; 1348 break; 1349 #endif 1350 default: 1351 BUG(); 1352 } 1353 xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0); 1354 1355 if (likely(xdst)) { 1356 struct dst_entry *dst = &xdst->u.dst; 1357 1358 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); 1359 xdst->flo.ops = &xfrm_bundle_fc_ops; 1360 } else 1361 xdst = ERR_PTR(-ENOBUFS); 1362 1363 xfrm_policy_put_afinfo(afinfo); 1364 1365 return xdst; 1366 } 1367 1368 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 1369 int nfheader_len) 1370 { 1371 struct xfrm_policy_afinfo *afinfo = 1372 xfrm_policy_get_afinfo(dst->ops->family); 1373 int err; 1374 1375 if (!afinfo) 1376 return -EINVAL; 1377 1378 err = afinfo->init_path(path, dst, nfheader_len); 1379 1380 xfrm_policy_put_afinfo(afinfo); 1381 1382 return err; 1383 } 1384 1385 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, 1386 const struct flowi *fl) 1387 { 1388 struct xfrm_policy_afinfo *afinfo = 1389 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 1390 int err; 1391 1392 if (!afinfo) 1393 return -EINVAL; 1394 1395 err = afinfo->fill_dst(xdst, dev, fl); 1396 1397 xfrm_policy_put_afinfo(afinfo); 1398 1399 return err; 1400 } 1401 1402 1403 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 1404 * all the metrics... Shortly, bundle a bundle. 1405 */ 1406 1407 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 1408 struct xfrm_state **xfrm, int nx, 1409 const struct flowi *fl, 1410 struct dst_entry *dst) 1411 { 1412 struct net *net = xp_net(policy); 1413 unsigned long now = jiffies; 1414 struct net_device *dev; 1415 struct xfrm_mode *inner_mode; 1416 struct dst_entry *dst_prev = NULL; 1417 struct dst_entry *dst0 = NULL; 1418 int i = 0; 1419 int err; 1420 int header_len = 0; 1421 int nfheader_len = 0; 1422 int trailer_len = 0; 1423 int tos; 1424 int family = policy->selector.family; 1425 xfrm_address_t saddr, daddr; 1426 1427 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 1428 1429 tos = xfrm_get_tos(fl, family); 1430 err = tos; 1431 if (tos < 0) 1432 goto put_states; 1433 1434 dst_hold(dst); 1435 1436 for (; i < nx; i++) { 1437 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 1438 struct dst_entry *dst1 = &xdst->u.dst; 1439 1440 err = PTR_ERR(xdst); 1441 if (IS_ERR(xdst)) { 1442 dst_release(dst); 1443 goto put_states; 1444 } 1445 1446 if (xfrm[i]->sel.family == AF_UNSPEC) { 1447 inner_mode = xfrm_ip2inner_mode(xfrm[i], 1448 xfrm_af2proto(family)); 1449 if (!inner_mode) { 1450 err = -EAFNOSUPPORT; 1451 dst_release(dst); 1452 goto put_states; 1453 } 1454 } else 1455 inner_mode = xfrm[i]->inner_mode; 1456 1457 if (!dst_prev) 1458 dst0 = dst1; 1459 else { 1460 dst_prev->child = dst_clone(dst1); 1461 dst1->flags |= DST_NOHASH; 1462 } 1463 1464 xdst->route = dst; 1465 dst_copy_metrics(dst1, dst); 1466 1467 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 1468 family = xfrm[i]->props.family; 1469 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr, 1470 family); 1471 err = PTR_ERR(dst); 1472 if (IS_ERR(dst)) 1473 goto put_states; 1474 } else 1475 dst_hold(dst); 1476 1477 dst1->xfrm = xfrm[i]; 1478 xdst->xfrm_genid = xfrm[i]->genid; 1479 1480 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 1481 dst1->flags |= DST_HOST; 1482 dst1->lastuse = now; 1483 1484 dst1->input = dst_discard; 1485 dst1->output = inner_mode->afinfo->output; 1486 1487 dst1->next = dst_prev; 1488 dst_prev = dst1; 1489 1490 header_len += xfrm[i]->props.header_len; 1491 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 1492 nfheader_len += xfrm[i]->props.header_len; 1493 trailer_len += xfrm[i]->props.trailer_len; 1494 } 1495 1496 dst_prev->child = dst; 1497 dst0->path = dst; 1498 1499 err = -ENODEV; 1500 dev = dst->dev; 1501 if (!dev) 1502 goto free_dst; 1503 1504 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1505 xfrm_init_pmtu(dst_prev); 1506 1507 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { 1508 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; 1509 1510 err = xfrm_fill_dst(xdst, dev, fl); 1511 if (err) 1512 goto free_dst; 1513 1514 dst_prev->header_len = header_len; 1515 dst_prev->trailer_len = trailer_len; 1516 header_len -= xdst->u.dst.xfrm->props.header_len; 1517 trailer_len -= xdst->u.dst.xfrm->props.trailer_len; 1518 } 1519 1520 out: 1521 return dst0; 1522 1523 put_states: 1524 for (; i < nx; i++) 1525 xfrm_state_put(xfrm[i]); 1526 free_dst: 1527 if (dst0) 1528 dst_free(dst0); 1529 dst0 = ERR_PTR(err); 1530 goto out; 1531 } 1532 1533 static int inline 1534 xfrm_dst_alloc_copy(void **target, const void *src, int size) 1535 { 1536 if (!*target) { 1537 *target = kmalloc(size, GFP_ATOMIC); 1538 if (!*target) 1539 return -ENOMEM; 1540 } 1541 memcpy(*target, src, size); 1542 return 0; 1543 } 1544 1545 static int inline 1546 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel) 1547 { 1548 #ifdef CONFIG_XFRM_SUB_POLICY 1549 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1550 return xfrm_dst_alloc_copy((void **)&(xdst->partner), 1551 sel, sizeof(*sel)); 1552 #else 1553 return 0; 1554 #endif 1555 } 1556 1557 static int inline 1558 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl) 1559 { 1560 #ifdef CONFIG_XFRM_SUB_POLICY 1561 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1562 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); 1563 #else 1564 return 0; 1565 #endif 1566 } 1567 1568 static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1569 struct xfrm_policy **pols, 1570 int *num_pols, int *num_xfrms) 1571 { 1572 int i; 1573 1574 if (*num_pols == 0 || !pols[0]) { 1575 *num_pols = 0; 1576 *num_xfrms = 0; 1577 return 0; 1578 } 1579 if (IS_ERR(pols[0])) 1580 return PTR_ERR(pols[0]); 1581 1582 *num_xfrms = pols[0]->xfrm_nr; 1583 1584 #ifdef CONFIG_XFRM_SUB_POLICY 1585 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && 1586 pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 1587 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), 1588 XFRM_POLICY_TYPE_MAIN, 1589 fl, family, 1590 XFRM_POLICY_OUT); 1591 if (pols[1]) { 1592 if (IS_ERR(pols[1])) { 1593 xfrm_pols_put(pols, *num_pols); 1594 return PTR_ERR(pols[1]); 1595 } 1596 (*num_pols) ++; 1597 (*num_xfrms) += pols[1]->xfrm_nr; 1598 } 1599 } 1600 #endif 1601 for (i = 0; i < *num_pols; i++) { 1602 if (pols[i]->action != XFRM_POLICY_ALLOW) { 1603 *num_xfrms = -1; 1604 break; 1605 } 1606 } 1607 1608 return 0; 1609 1610 } 1611 1612 static struct xfrm_dst * 1613 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, 1614 const struct flowi *fl, u16 family, 1615 struct dst_entry *dst_orig) 1616 { 1617 struct net *net = xp_net(pols[0]); 1618 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 1619 struct dst_entry *dst; 1620 struct xfrm_dst *xdst; 1621 int err; 1622 1623 /* Try to instantiate a bundle */ 1624 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1625 if (err <= 0) { 1626 if (err != 0 && err != -EAGAIN) 1627 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1628 return ERR_PTR(err); 1629 } 1630 1631 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); 1632 if (IS_ERR(dst)) { 1633 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); 1634 return ERR_CAST(dst); 1635 } 1636 1637 xdst = (struct xfrm_dst *)dst; 1638 xdst->num_xfrms = err; 1639 if (num_pols > 1) 1640 err = xfrm_dst_update_parent(dst, &pols[1]->selector); 1641 else 1642 err = xfrm_dst_update_origin(dst, fl); 1643 if (unlikely(err)) { 1644 dst_free(dst); 1645 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); 1646 return ERR_PTR(err); 1647 } 1648 1649 xdst->num_pols = num_pols; 1650 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1651 xdst->policy_genid = atomic_read(&pols[0]->genid); 1652 1653 return xdst; 1654 } 1655 1656 static struct flow_cache_object * 1657 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, 1658 struct flow_cache_object *oldflo, void *ctx) 1659 { 1660 struct dst_entry *dst_orig = (struct dst_entry *)ctx; 1661 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1662 struct xfrm_dst *xdst, *new_xdst; 1663 int num_pols = 0, num_xfrms = 0, i, err, pol_dead; 1664 1665 /* Check if the policies from old bundle are usable */ 1666 xdst = NULL; 1667 if (oldflo) { 1668 xdst = container_of(oldflo, struct xfrm_dst, flo); 1669 num_pols = xdst->num_pols; 1670 num_xfrms = xdst->num_xfrms; 1671 pol_dead = 0; 1672 for (i = 0; i < num_pols; i++) { 1673 pols[i] = xdst->pols[i]; 1674 pol_dead |= pols[i]->walk.dead; 1675 } 1676 if (pol_dead) { 1677 dst_free(&xdst->u.dst); 1678 xdst = NULL; 1679 num_pols = 0; 1680 num_xfrms = 0; 1681 oldflo = NULL; 1682 } 1683 } 1684 1685 /* Resolve policies to use if we couldn't get them from 1686 * previous cache entry */ 1687 if (xdst == NULL) { 1688 num_pols = 1; 1689 pols[0] = __xfrm_policy_lookup(net, fl, family, dir); 1690 err = xfrm_expand_policies(fl, family, pols, 1691 &num_pols, &num_xfrms); 1692 if (err < 0) 1693 goto inc_error; 1694 if (num_pols == 0) 1695 return NULL; 1696 if (num_xfrms <= 0) 1697 goto make_dummy_bundle; 1698 } 1699 1700 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); 1701 if (IS_ERR(new_xdst)) { 1702 err = PTR_ERR(new_xdst); 1703 if (err != -EAGAIN) 1704 goto error; 1705 if (oldflo == NULL) 1706 goto make_dummy_bundle; 1707 dst_hold(&xdst->u.dst); 1708 return oldflo; 1709 } else if (new_xdst == NULL) { 1710 num_xfrms = 0; 1711 if (oldflo == NULL) 1712 goto make_dummy_bundle; 1713 xdst->num_xfrms = 0; 1714 dst_hold(&xdst->u.dst); 1715 return oldflo; 1716 } 1717 1718 /* Kill the previous bundle */ 1719 if (xdst) { 1720 /* The policies were stolen for newly generated bundle */ 1721 xdst->num_pols = 0; 1722 dst_free(&xdst->u.dst); 1723 } 1724 1725 /* Flow cache does not have reference, it dst_free()'s, 1726 * but we do need to return one reference for original caller */ 1727 dst_hold(&new_xdst->u.dst); 1728 return &new_xdst->flo; 1729 1730 make_dummy_bundle: 1731 /* We found policies, but there's no bundles to instantiate: 1732 * either because the policy blocks, has no transformations or 1733 * we could not build template (no xfrm_states).*/ 1734 xdst = xfrm_alloc_dst(net, family); 1735 if (IS_ERR(xdst)) { 1736 xfrm_pols_put(pols, num_pols); 1737 return ERR_CAST(xdst); 1738 } 1739 xdst->num_pols = num_pols; 1740 xdst->num_xfrms = num_xfrms; 1741 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1742 1743 dst_hold(&xdst->u.dst); 1744 return &xdst->flo; 1745 1746 inc_error: 1747 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1748 error: 1749 if (xdst != NULL) 1750 dst_free(&xdst->u.dst); 1751 else 1752 xfrm_pols_put(pols, num_pols); 1753 return ERR_PTR(err); 1754 } 1755 1756 static struct dst_entry *make_blackhole(struct net *net, u16 family, 1757 struct dst_entry *dst_orig) 1758 { 1759 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1760 struct dst_entry *ret; 1761 1762 if (!afinfo) { 1763 dst_release(dst_orig); 1764 ret = ERR_PTR(-EINVAL); 1765 } else { 1766 ret = afinfo->blackhole_route(net, dst_orig); 1767 } 1768 xfrm_policy_put_afinfo(afinfo); 1769 1770 return ret; 1771 } 1772 1773 /* Main function: finds/creates a bundle for given flow. 1774 * 1775 * At the moment we eat a raw IP route. Mostly to speed up lookups 1776 * on interfaces with disabled IPsec. 1777 */ 1778 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 1779 const struct flowi *fl, 1780 struct sock *sk, int flags) 1781 { 1782 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1783 struct flow_cache_object *flo; 1784 struct xfrm_dst *xdst; 1785 struct dst_entry *dst, *route; 1786 u16 family = dst_orig->ops->family; 1787 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 1788 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 1789 1790 restart: 1791 dst = NULL; 1792 xdst = NULL; 1793 route = NULL; 1794 1795 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 1796 num_pols = 1; 1797 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1798 err = xfrm_expand_policies(fl, family, pols, 1799 &num_pols, &num_xfrms); 1800 if (err < 0) 1801 goto dropdst; 1802 1803 if (num_pols) { 1804 if (num_xfrms <= 0) { 1805 drop_pols = num_pols; 1806 goto no_transform; 1807 } 1808 1809 xdst = xfrm_resolve_and_create_bundle( 1810 pols, num_pols, fl, 1811 family, dst_orig); 1812 if (IS_ERR(xdst)) { 1813 xfrm_pols_put(pols, num_pols); 1814 err = PTR_ERR(xdst); 1815 goto dropdst; 1816 } else if (xdst == NULL) { 1817 num_xfrms = 0; 1818 drop_pols = num_pols; 1819 goto no_transform; 1820 } 1821 1822 dst_hold(&xdst->u.dst); 1823 1824 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 1825 xdst->u.dst.next = xfrm_policy_sk_bundles; 1826 xfrm_policy_sk_bundles = &xdst->u.dst; 1827 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 1828 1829 route = xdst->route; 1830 } 1831 } 1832 1833 if (xdst == NULL) { 1834 /* To accelerate a bit... */ 1835 if ((dst_orig->flags & DST_NOXFRM) || 1836 !net->xfrm.policy_count[XFRM_POLICY_OUT]) 1837 goto nopol; 1838 1839 flo = flow_cache_lookup(net, fl, family, dir, 1840 xfrm_bundle_lookup, dst_orig); 1841 if (flo == NULL) 1842 goto nopol; 1843 if (IS_ERR(flo)) { 1844 err = PTR_ERR(flo); 1845 goto dropdst; 1846 } 1847 xdst = container_of(flo, struct xfrm_dst, flo); 1848 1849 num_pols = xdst->num_pols; 1850 num_xfrms = xdst->num_xfrms; 1851 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols); 1852 route = xdst->route; 1853 } 1854 1855 dst = &xdst->u.dst; 1856 if (route == NULL && num_xfrms > 0) { 1857 /* The only case when xfrm_bundle_lookup() returns a 1858 * bundle with null route, is when the template could 1859 * not be resolved. It means policies are there, but 1860 * bundle could not be created, since we don't yet 1861 * have the xfrm_state's. We need to wait for KM to 1862 * negotiate new SA's or bail out with error.*/ 1863 if (net->xfrm.sysctl_larval_drop) { 1864 /* EREMOTE tells the caller to generate 1865 * a one-shot blackhole route. */ 1866 dst_release(dst); 1867 xfrm_pols_put(pols, drop_pols); 1868 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1869 1870 return make_blackhole(net, family, dst_orig); 1871 } 1872 if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) { 1873 DECLARE_WAITQUEUE(wait, current); 1874 1875 add_wait_queue(&net->xfrm.km_waitq, &wait); 1876 set_current_state(TASK_INTERRUPTIBLE); 1877 schedule(); 1878 set_current_state(TASK_RUNNING); 1879 remove_wait_queue(&net->xfrm.km_waitq, &wait); 1880 1881 if (!signal_pending(current)) { 1882 dst_release(dst); 1883 goto restart; 1884 } 1885 1886 err = -ERESTART; 1887 } else 1888 err = -EAGAIN; 1889 1890 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1891 goto error; 1892 } 1893 1894 no_transform: 1895 if (num_pols == 0) 1896 goto nopol; 1897 1898 if ((flags & XFRM_LOOKUP_ICMP) && 1899 !(pols[0]->flags & XFRM_POLICY_ICMP)) { 1900 err = -ENOENT; 1901 goto error; 1902 } 1903 1904 for (i = 0; i < num_pols; i++) 1905 pols[i]->curlft.use_time = get_seconds(); 1906 1907 if (num_xfrms < 0) { 1908 /* Prohibit the flow */ 1909 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 1910 err = -EPERM; 1911 goto error; 1912 } else if (num_xfrms > 0) { 1913 /* Flow transformed */ 1914 dst_release(dst_orig); 1915 } else { 1916 /* Flow passes untransformed */ 1917 dst_release(dst); 1918 dst = dst_orig; 1919 } 1920 ok: 1921 xfrm_pols_put(pols, drop_pols); 1922 if (dst && dst->xfrm && 1923 dst->xfrm->props.mode == XFRM_MODE_TUNNEL) 1924 dst->flags |= DST_XFRM_TUNNEL; 1925 return dst; 1926 1927 nopol: 1928 if (!(flags & XFRM_LOOKUP_ICMP)) { 1929 dst = dst_orig; 1930 goto ok; 1931 } 1932 err = -ENOENT; 1933 error: 1934 dst_release(dst); 1935 dropdst: 1936 dst_release(dst_orig); 1937 xfrm_pols_put(pols, drop_pols); 1938 return ERR_PTR(err); 1939 } 1940 EXPORT_SYMBOL(xfrm_lookup); 1941 1942 static inline int 1943 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) 1944 { 1945 struct xfrm_state *x; 1946 1947 if (!skb->sp || idx < 0 || idx >= skb->sp->len) 1948 return 0; 1949 x = skb->sp->xvec[idx]; 1950 if (!x->type->reject) 1951 return 0; 1952 return x->type->reject(x, skb, fl); 1953 } 1954 1955 /* When skb is transformed back to its "native" form, we have to 1956 * check policy restrictions. At the moment we make this in maximally 1957 * stupid way. Shame on me. :-) Of course, connected sockets must 1958 * have policy cached at them. 1959 */ 1960 1961 static inline int 1962 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, 1963 unsigned short family) 1964 { 1965 if (xfrm_state_kern(x)) 1966 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 1967 return x->id.proto == tmpl->id.proto && 1968 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1969 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1970 x->props.mode == tmpl->mode && 1971 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 1972 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 1973 !(x->props.mode != XFRM_MODE_TRANSPORT && 1974 xfrm_state_addr_cmp(tmpl, x, family)); 1975 } 1976 1977 /* 1978 * 0 or more than 0 is returned when validation is succeeded (either bypass 1979 * because of optional transport mode, or next index of the mathced secpath 1980 * state with the template. 1981 * -1 is returned when no matching template is found. 1982 * Otherwise "-2 - errored_index" is returned. 1983 */ 1984 static inline int 1985 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, 1986 unsigned short family) 1987 { 1988 int idx = start; 1989 1990 if (tmpl->optional) { 1991 if (tmpl->mode == XFRM_MODE_TRANSPORT) 1992 return start; 1993 } else 1994 start = -1; 1995 for (; idx < sp->len; idx++) { 1996 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 1997 return ++idx; 1998 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 1999 if (start == -1) 2000 start = -2-idx; 2001 break; 2002 } 2003 } 2004 return start; 2005 } 2006 2007 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 2008 unsigned int family, int reverse) 2009 { 2010 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2011 int err; 2012 2013 if (unlikely(afinfo == NULL)) 2014 return -EAFNOSUPPORT; 2015 2016 afinfo->decode_session(skb, fl, reverse); 2017 err = security_xfrm_decode_session(skb, &fl->flowi_secid); 2018 xfrm_policy_put_afinfo(afinfo); 2019 return err; 2020 } 2021 EXPORT_SYMBOL(__xfrm_decode_session); 2022 2023 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) 2024 { 2025 for (; k < sp->len; k++) { 2026 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 2027 *idxp = k; 2028 return 1; 2029 } 2030 } 2031 2032 return 0; 2033 } 2034 2035 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 2036 unsigned short family) 2037 { 2038 struct net *net = dev_net(skb->dev); 2039 struct xfrm_policy *pol; 2040 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 2041 int npols = 0; 2042 int xfrm_nr; 2043 int pi; 2044 int reverse; 2045 struct flowi fl; 2046 u8 fl_dir; 2047 int xerr_idx = -1; 2048 2049 reverse = dir & ~XFRM_POLICY_MASK; 2050 dir &= XFRM_POLICY_MASK; 2051 fl_dir = policy_to_flow_dir(dir); 2052 2053 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { 2054 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 2055 return 0; 2056 } 2057 2058 nf_nat_decode_session(skb, &fl, family); 2059 2060 /* First, check used SA against their selectors. */ 2061 if (skb->sp) { 2062 int i; 2063 2064 for (i=skb->sp->len-1; i>=0; i--) { 2065 struct xfrm_state *x = skb->sp->xvec[i]; 2066 if (!xfrm_selector_match(&x->sel, &fl, family)) { 2067 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 2068 return 0; 2069 } 2070 } 2071 } 2072 2073 pol = NULL; 2074 if (sk && sk->sk_policy[dir]) { 2075 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2076 if (IS_ERR(pol)) { 2077 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2078 return 0; 2079 } 2080 } 2081 2082 if (!pol) { 2083 struct flow_cache_object *flo; 2084 2085 flo = flow_cache_lookup(net, &fl, family, fl_dir, 2086 xfrm_policy_lookup, NULL); 2087 if (IS_ERR_OR_NULL(flo)) 2088 pol = ERR_CAST(flo); 2089 else 2090 pol = container_of(flo, struct xfrm_policy, flo); 2091 } 2092 2093 if (IS_ERR(pol)) { 2094 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2095 return 0; 2096 } 2097 2098 if (!pol) { 2099 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { 2100 xfrm_secpath_reject(xerr_idx, skb, &fl); 2101 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 2102 return 0; 2103 } 2104 return 1; 2105 } 2106 2107 pol->curlft.use_time = get_seconds(); 2108 2109 pols[0] = pol; 2110 npols ++; 2111 #ifdef CONFIG_XFRM_SUB_POLICY 2112 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 2113 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, 2114 &fl, family, 2115 XFRM_POLICY_IN); 2116 if (pols[1]) { 2117 if (IS_ERR(pols[1])) { 2118 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2119 return 0; 2120 } 2121 pols[1]->curlft.use_time = get_seconds(); 2122 npols ++; 2123 } 2124 } 2125 #endif 2126 2127 if (pol->action == XFRM_POLICY_ALLOW) { 2128 struct sec_path *sp; 2129 static struct sec_path dummy; 2130 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 2131 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 2132 struct xfrm_tmpl **tpp = tp; 2133 int ti = 0; 2134 int i, k; 2135 2136 if ((sp = skb->sp) == NULL) 2137 sp = &dummy; 2138 2139 for (pi = 0; pi < npols; pi++) { 2140 if (pols[pi] != pol && 2141 pols[pi]->action != XFRM_POLICY_ALLOW) { 2142 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2143 goto reject; 2144 } 2145 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 2146 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 2147 goto reject_error; 2148 } 2149 for (i = 0; i < pols[pi]->xfrm_nr; i++) 2150 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 2151 } 2152 xfrm_nr = ti; 2153 if (npols > 1) { 2154 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); 2155 tpp = stp; 2156 } 2157 2158 /* For each tunnel xfrm, find the first matching tmpl. 2159 * For each tmpl before that, find corresponding xfrm. 2160 * Order is _important_. Later we will implement 2161 * some barriers, but at the moment barriers 2162 * are implied between each two transformations. 2163 */ 2164 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 2165 k = xfrm_policy_ok(tpp[i], sp, k, family); 2166 if (k < 0) { 2167 if (k < -1) 2168 /* "-2 - errored_index" returned */ 2169 xerr_idx = -(2+k); 2170 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2171 goto reject; 2172 } 2173 } 2174 2175 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 2176 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2177 goto reject; 2178 } 2179 2180 xfrm_pols_put(pols, npols); 2181 return 1; 2182 } 2183 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2184 2185 reject: 2186 xfrm_secpath_reject(xerr_idx, skb, &fl); 2187 reject_error: 2188 xfrm_pols_put(pols, npols); 2189 return 0; 2190 } 2191 EXPORT_SYMBOL(__xfrm_policy_check); 2192 2193 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 2194 { 2195 struct net *net = dev_net(skb->dev); 2196 struct flowi fl; 2197 struct dst_entry *dst; 2198 int res = 1; 2199 2200 if (xfrm_decode_session(skb, &fl, family) < 0) { 2201 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 2202 return 0; 2203 } 2204 2205 skb_dst_force(skb); 2206 2207 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0); 2208 if (IS_ERR(dst)) { 2209 res = 0; 2210 dst = NULL; 2211 } 2212 skb_dst_set(skb, dst); 2213 return res; 2214 } 2215 EXPORT_SYMBOL(__xfrm_route_forward); 2216 2217 /* Optimize later using cookies and generation ids. */ 2218 2219 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 2220 { 2221 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 2222 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to 2223 * get validated by dst_ops->check on every use. We do this 2224 * because when a normal route referenced by an XFRM dst is 2225 * obsoleted we do not go looking around for all parent 2226 * referencing XFRM dsts so that we can invalidate them. It 2227 * is just too much work. Instead we make the checks here on 2228 * every use. For example: 2229 * 2230 * XFRM dst A --> IPv4 dst X 2231 * 2232 * X is the "xdst->route" of A (X is also the "dst->path" of A 2233 * in this example). If X is marked obsolete, "A" will not 2234 * notice. That's what we are validating here via the 2235 * stale_bundle() check. 2236 * 2237 * When a policy's bundle is pruned, we dst_free() the XFRM 2238 * dst which causes it's ->obsolete field to be set to 2239 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like 2240 * this, we want to force a new route lookup. 2241 */ 2242 if (dst->obsolete < 0 && !stale_bundle(dst)) 2243 return dst; 2244 2245 return NULL; 2246 } 2247 2248 static int stale_bundle(struct dst_entry *dst) 2249 { 2250 return !xfrm_bundle_ok((struct xfrm_dst *)dst); 2251 } 2252 2253 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2254 { 2255 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 2256 dst->dev = dev_net(dev)->loopback_dev; 2257 dev_hold(dst->dev); 2258 dev_put(dev); 2259 } 2260 } 2261 EXPORT_SYMBOL(xfrm_dst_ifdown); 2262 2263 static void xfrm_link_failure(struct sk_buff *skb) 2264 { 2265 /* Impossible. Such dst must be popped before reaches point of failure. */ 2266 } 2267 2268 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 2269 { 2270 if (dst) { 2271 if (dst->obsolete) { 2272 dst_release(dst); 2273 dst = NULL; 2274 } 2275 } 2276 return dst; 2277 } 2278 2279 static void __xfrm_garbage_collect(struct net *net) 2280 { 2281 struct dst_entry *head, *next; 2282 2283 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 2284 head = xfrm_policy_sk_bundles; 2285 xfrm_policy_sk_bundles = NULL; 2286 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 2287 2288 while (head) { 2289 next = head->next; 2290 dst_free(head); 2291 head = next; 2292 } 2293 } 2294 2295 static void xfrm_garbage_collect(struct net *net) 2296 { 2297 flow_cache_flush(); 2298 __xfrm_garbage_collect(net); 2299 } 2300 2301 static void xfrm_garbage_collect_deferred(struct net *net) 2302 { 2303 flow_cache_flush_deferred(); 2304 __xfrm_garbage_collect(net); 2305 } 2306 2307 static void xfrm_init_pmtu(struct dst_entry *dst) 2308 { 2309 do { 2310 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2311 u32 pmtu, route_mtu_cached; 2312 2313 pmtu = dst_mtu(dst->child); 2314 xdst->child_mtu_cached = pmtu; 2315 2316 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 2317 2318 route_mtu_cached = dst_mtu(xdst->route); 2319 xdst->route_mtu_cached = route_mtu_cached; 2320 2321 if (pmtu > route_mtu_cached) 2322 pmtu = route_mtu_cached; 2323 2324 dst_metric_set(dst, RTAX_MTU, pmtu); 2325 } while ((dst = dst->next)); 2326 } 2327 2328 /* Check that the bundle accepts the flow and its components are 2329 * still valid. 2330 */ 2331 2332 static int xfrm_bundle_ok(struct xfrm_dst *first) 2333 { 2334 struct dst_entry *dst = &first->u.dst; 2335 struct xfrm_dst *last; 2336 u32 mtu; 2337 2338 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 2339 (dst->dev && !netif_running(dst->dev))) 2340 return 0; 2341 2342 last = NULL; 2343 2344 do { 2345 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2346 2347 if (dst->xfrm->km.state != XFRM_STATE_VALID) 2348 return 0; 2349 if (xdst->xfrm_genid != dst->xfrm->genid) 2350 return 0; 2351 if (xdst->num_pols > 0 && 2352 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 2353 return 0; 2354 2355 mtu = dst_mtu(dst->child); 2356 if (xdst->child_mtu_cached != mtu) { 2357 last = xdst; 2358 xdst->child_mtu_cached = mtu; 2359 } 2360 2361 if (!dst_check(xdst->route, xdst->route_cookie)) 2362 return 0; 2363 mtu = dst_mtu(xdst->route); 2364 if (xdst->route_mtu_cached != mtu) { 2365 last = xdst; 2366 xdst->route_mtu_cached = mtu; 2367 } 2368 2369 dst = dst->child; 2370 } while (dst->xfrm); 2371 2372 if (likely(!last)) 2373 return 1; 2374 2375 mtu = last->child_mtu_cached; 2376 for (;;) { 2377 dst = &last->u.dst; 2378 2379 mtu = xfrm_state_mtu(dst->xfrm, mtu); 2380 if (mtu > last->route_mtu_cached) 2381 mtu = last->route_mtu_cached; 2382 dst_metric_set(dst, RTAX_MTU, mtu); 2383 2384 if (last == first) 2385 break; 2386 2387 last = (struct xfrm_dst *)last->u.dst.next; 2388 last->child_mtu_cached = mtu; 2389 } 2390 2391 return 1; 2392 } 2393 2394 static unsigned int xfrm_default_advmss(const struct dst_entry *dst) 2395 { 2396 return dst_metric_advmss(dst->path); 2397 } 2398 2399 static unsigned int xfrm_mtu(const struct dst_entry *dst) 2400 { 2401 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 2402 2403 return mtu ? : dst_mtu(dst->path); 2404 } 2405 2406 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, 2407 struct sk_buff *skb, 2408 const void *daddr) 2409 { 2410 return dst->path->ops->neigh_lookup(dst, skb, daddr); 2411 } 2412 2413 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2414 { 2415 struct net *net; 2416 int err = 0; 2417 if (unlikely(afinfo == NULL)) 2418 return -EINVAL; 2419 if (unlikely(afinfo->family >= NPROTO)) 2420 return -EAFNOSUPPORT; 2421 write_lock_bh(&xfrm_policy_afinfo_lock); 2422 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 2423 err = -ENOBUFS; 2424 else { 2425 struct dst_ops *dst_ops = afinfo->dst_ops; 2426 if (likely(dst_ops->kmem_cachep == NULL)) 2427 dst_ops->kmem_cachep = xfrm_dst_cache; 2428 if (likely(dst_ops->check == NULL)) 2429 dst_ops->check = xfrm_dst_check; 2430 if (likely(dst_ops->default_advmss == NULL)) 2431 dst_ops->default_advmss = xfrm_default_advmss; 2432 if (likely(dst_ops->mtu == NULL)) 2433 dst_ops->mtu = xfrm_mtu; 2434 if (likely(dst_ops->negative_advice == NULL)) 2435 dst_ops->negative_advice = xfrm_negative_advice; 2436 if (likely(dst_ops->link_failure == NULL)) 2437 dst_ops->link_failure = xfrm_link_failure; 2438 if (likely(dst_ops->neigh_lookup == NULL)) 2439 dst_ops->neigh_lookup = xfrm_neigh_lookup; 2440 if (likely(afinfo->garbage_collect == NULL)) 2441 afinfo->garbage_collect = xfrm_garbage_collect_deferred; 2442 xfrm_policy_afinfo[afinfo->family] = afinfo; 2443 } 2444 write_unlock_bh(&xfrm_policy_afinfo_lock); 2445 2446 rtnl_lock(); 2447 for_each_net(net) { 2448 struct dst_ops *xfrm_dst_ops; 2449 2450 switch (afinfo->family) { 2451 case AF_INET: 2452 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; 2453 break; 2454 #if IS_ENABLED(CONFIG_IPV6) 2455 case AF_INET6: 2456 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; 2457 break; 2458 #endif 2459 default: 2460 BUG(); 2461 } 2462 *xfrm_dst_ops = *afinfo->dst_ops; 2463 } 2464 rtnl_unlock(); 2465 2466 return err; 2467 } 2468 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 2469 2470 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) 2471 { 2472 int err = 0; 2473 if (unlikely(afinfo == NULL)) 2474 return -EINVAL; 2475 if (unlikely(afinfo->family >= NPROTO)) 2476 return -EAFNOSUPPORT; 2477 write_lock_bh(&xfrm_policy_afinfo_lock); 2478 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 2479 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 2480 err = -EINVAL; 2481 else { 2482 struct dst_ops *dst_ops = afinfo->dst_ops; 2483 xfrm_policy_afinfo[afinfo->family] = NULL; 2484 dst_ops->kmem_cachep = NULL; 2485 dst_ops->check = NULL; 2486 dst_ops->negative_advice = NULL; 2487 dst_ops->link_failure = NULL; 2488 afinfo->garbage_collect = NULL; 2489 } 2490 } 2491 write_unlock_bh(&xfrm_policy_afinfo_lock); 2492 return err; 2493 } 2494 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2495 2496 static void __net_init xfrm_dst_ops_init(struct net *net) 2497 { 2498 struct xfrm_policy_afinfo *afinfo; 2499 2500 read_lock_bh(&xfrm_policy_afinfo_lock); 2501 afinfo = xfrm_policy_afinfo[AF_INET]; 2502 if (afinfo) 2503 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; 2504 #if IS_ENABLED(CONFIG_IPV6) 2505 afinfo = xfrm_policy_afinfo[AF_INET6]; 2506 if (afinfo) 2507 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; 2508 #endif 2509 read_unlock_bh(&xfrm_policy_afinfo_lock); 2510 } 2511 2512 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2513 { 2514 struct xfrm_policy_afinfo *afinfo; 2515 if (unlikely(family >= NPROTO)) 2516 return NULL; 2517 read_lock(&xfrm_policy_afinfo_lock); 2518 afinfo = xfrm_policy_afinfo[family]; 2519 if (unlikely(!afinfo)) 2520 read_unlock(&xfrm_policy_afinfo_lock); 2521 return afinfo; 2522 } 2523 2524 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) 2525 { 2526 read_unlock(&xfrm_policy_afinfo_lock); 2527 } 2528 2529 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 2530 { 2531 struct net_device *dev = ptr; 2532 2533 switch (event) { 2534 case NETDEV_DOWN: 2535 xfrm_garbage_collect(dev_net(dev)); 2536 } 2537 return NOTIFY_DONE; 2538 } 2539 2540 static struct notifier_block xfrm_dev_notifier = { 2541 .notifier_call = xfrm_dev_event, 2542 }; 2543 2544 #ifdef CONFIG_XFRM_STATISTICS 2545 static int __net_init xfrm_statistics_init(struct net *net) 2546 { 2547 int rv; 2548 2549 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, 2550 sizeof(struct linux_xfrm_mib), 2551 __alignof__(struct linux_xfrm_mib)) < 0) 2552 return -ENOMEM; 2553 rv = xfrm_proc_init(net); 2554 if (rv < 0) 2555 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2556 return rv; 2557 } 2558 2559 static void xfrm_statistics_fini(struct net *net) 2560 { 2561 xfrm_proc_fini(net); 2562 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2563 } 2564 #else 2565 static int __net_init xfrm_statistics_init(struct net *net) 2566 { 2567 return 0; 2568 } 2569 2570 static void xfrm_statistics_fini(struct net *net) 2571 { 2572 } 2573 #endif 2574 2575 static int __net_init xfrm_policy_init(struct net *net) 2576 { 2577 unsigned int hmask, sz; 2578 int dir; 2579 2580 if (net_eq(net, &init_net)) 2581 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 2582 sizeof(struct xfrm_dst), 2583 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2584 NULL); 2585 2586 hmask = 8 - 1; 2587 sz = (hmask+1) * sizeof(struct hlist_head); 2588 2589 net->xfrm.policy_byidx = xfrm_hash_alloc(sz); 2590 if (!net->xfrm.policy_byidx) 2591 goto out_byidx; 2592 net->xfrm.policy_idx_hmask = hmask; 2593 2594 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2595 struct xfrm_policy_hash *htab; 2596 2597 net->xfrm.policy_count[dir] = 0; 2598 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 2599 2600 htab = &net->xfrm.policy_bydst[dir]; 2601 htab->table = xfrm_hash_alloc(sz); 2602 if (!htab->table) 2603 goto out_bydst; 2604 htab->hmask = hmask; 2605 } 2606 2607 INIT_LIST_HEAD(&net->xfrm.policy_all); 2608 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 2609 if (net_eq(net, &init_net)) 2610 register_netdevice_notifier(&xfrm_dev_notifier); 2611 return 0; 2612 2613 out_bydst: 2614 for (dir--; dir >= 0; dir--) { 2615 struct xfrm_policy_hash *htab; 2616 2617 htab = &net->xfrm.policy_bydst[dir]; 2618 xfrm_hash_free(htab->table, sz); 2619 } 2620 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2621 out_byidx: 2622 return -ENOMEM; 2623 } 2624 2625 static void xfrm_policy_fini(struct net *net) 2626 { 2627 struct xfrm_audit audit_info; 2628 unsigned int sz; 2629 int dir; 2630 2631 flush_work(&net->xfrm.policy_hash_work); 2632 #ifdef CONFIG_XFRM_SUB_POLICY 2633 audit_info.loginuid = -1; 2634 audit_info.sessionid = -1; 2635 audit_info.secid = 0; 2636 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info); 2637 #endif 2638 audit_info.loginuid = -1; 2639 audit_info.sessionid = -1; 2640 audit_info.secid = 0; 2641 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); 2642 2643 WARN_ON(!list_empty(&net->xfrm.policy_all)); 2644 2645 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2646 struct xfrm_policy_hash *htab; 2647 2648 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 2649 2650 htab = &net->xfrm.policy_bydst[dir]; 2651 sz = (htab->hmask + 1); 2652 WARN_ON(!hlist_empty(htab->table)); 2653 xfrm_hash_free(htab->table, sz); 2654 } 2655 2656 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); 2657 WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); 2658 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2659 } 2660 2661 static int __net_init xfrm_net_init(struct net *net) 2662 { 2663 int rv; 2664 2665 rv = xfrm_statistics_init(net); 2666 if (rv < 0) 2667 goto out_statistics; 2668 rv = xfrm_state_init(net); 2669 if (rv < 0) 2670 goto out_state; 2671 rv = xfrm_policy_init(net); 2672 if (rv < 0) 2673 goto out_policy; 2674 xfrm_dst_ops_init(net); 2675 rv = xfrm_sysctl_init(net); 2676 if (rv < 0) 2677 goto out_sysctl; 2678 return 0; 2679 2680 out_sysctl: 2681 xfrm_policy_fini(net); 2682 out_policy: 2683 xfrm_state_fini(net); 2684 out_state: 2685 xfrm_statistics_fini(net); 2686 out_statistics: 2687 return rv; 2688 } 2689 2690 static void __net_exit xfrm_net_exit(struct net *net) 2691 { 2692 xfrm_sysctl_fini(net); 2693 xfrm_policy_fini(net); 2694 xfrm_state_fini(net); 2695 xfrm_statistics_fini(net); 2696 } 2697 2698 static struct pernet_operations __net_initdata xfrm_net_ops = { 2699 .init = xfrm_net_init, 2700 .exit = xfrm_net_exit, 2701 }; 2702 2703 void __init xfrm_init(void) 2704 { 2705 register_pernet_subsys(&xfrm_net_ops); 2706 xfrm_input_init(); 2707 } 2708 2709 #ifdef CONFIG_AUDITSYSCALL 2710 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 2711 struct audit_buffer *audit_buf) 2712 { 2713 struct xfrm_sec_ctx *ctx = xp->security; 2714 struct xfrm_selector *sel = &xp->selector; 2715 2716 if (ctx) 2717 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 2718 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 2719 2720 switch(sel->family) { 2721 case AF_INET: 2722 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); 2723 if (sel->prefixlen_s != 32) 2724 audit_log_format(audit_buf, " src_prefixlen=%d", 2725 sel->prefixlen_s); 2726 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); 2727 if (sel->prefixlen_d != 32) 2728 audit_log_format(audit_buf, " dst_prefixlen=%d", 2729 sel->prefixlen_d); 2730 break; 2731 case AF_INET6: 2732 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); 2733 if (sel->prefixlen_s != 128) 2734 audit_log_format(audit_buf, " src_prefixlen=%d", 2735 sel->prefixlen_s); 2736 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); 2737 if (sel->prefixlen_d != 128) 2738 audit_log_format(audit_buf, " dst_prefixlen=%d", 2739 sel->prefixlen_d); 2740 break; 2741 } 2742 } 2743 2744 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 2745 uid_t auid, u32 sessionid, u32 secid) 2746 { 2747 struct audit_buffer *audit_buf; 2748 2749 audit_buf = xfrm_audit_start("SPD-add"); 2750 if (audit_buf == NULL) 2751 return; 2752 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2753 audit_log_format(audit_buf, " res=%u", result); 2754 xfrm_audit_common_policyinfo(xp, audit_buf); 2755 audit_log_end(audit_buf); 2756 } 2757 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 2758 2759 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 2760 uid_t auid, u32 sessionid, u32 secid) 2761 { 2762 struct audit_buffer *audit_buf; 2763 2764 audit_buf = xfrm_audit_start("SPD-delete"); 2765 if (audit_buf == NULL) 2766 return; 2767 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2768 audit_log_format(audit_buf, " res=%u", result); 2769 xfrm_audit_common_policyinfo(xp, audit_buf); 2770 audit_log_end(audit_buf); 2771 } 2772 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 2773 #endif 2774 2775 #ifdef CONFIG_XFRM_MIGRATE 2776 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 2777 const struct xfrm_selector *sel_tgt) 2778 { 2779 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 2780 if (sel_tgt->family == sel_cmp->family && 2781 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr, 2782 sel_cmp->family) == 0 && 2783 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr, 2784 sel_cmp->family) == 0 && 2785 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 2786 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 2787 return true; 2788 } 2789 } else { 2790 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 2791 return true; 2792 } 2793 } 2794 return false; 2795 } 2796 2797 static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, 2798 u8 dir, u8 type) 2799 { 2800 struct xfrm_policy *pol, *ret = NULL; 2801 struct hlist_node *entry; 2802 struct hlist_head *chain; 2803 u32 priority = ~0U; 2804 2805 read_lock_bh(&xfrm_policy_lock); 2806 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); 2807 hlist_for_each_entry(pol, entry, chain, bydst) { 2808 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2809 pol->type == type) { 2810 ret = pol; 2811 priority = ret->priority; 2812 break; 2813 } 2814 } 2815 chain = &init_net.xfrm.policy_inexact[dir]; 2816 hlist_for_each_entry(pol, entry, chain, bydst) { 2817 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2818 pol->type == type && 2819 pol->priority < priority) { 2820 ret = pol; 2821 break; 2822 } 2823 } 2824 2825 if (ret) 2826 xfrm_pol_hold(ret); 2827 2828 read_unlock_bh(&xfrm_policy_lock); 2829 2830 return ret; 2831 } 2832 2833 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) 2834 { 2835 int match = 0; 2836 2837 if (t->mode == m->mode && t->id.proto == m->proto && 2838 (m->reqid == 0 || t->reqid == m->reqid)) { 2839 switch (t->mode) { 2840 case XFRM_MODE_TUNNEL: 2841 case XFRM_MODE_BEET: 2842 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr, 2843 m->old_family) == 0 && 2844 xfrm_addr_cmp(&t->saddr, &m->old_saddr, 2845 m->old_family) == 0) { 2846 match = 1; 2847 } 2848 break; 2849 case XFRM_MODE_TRANSPORT: 2850 /* in case of transport mode, template does not store 2851 any IP addresses, hence we just compare mode and 2852 protocol */ 2853 match = 1; 2854 break; 2855 default: 2856 break; 2857 } 2858 } 2859 return match; 2860 } 2861 2862 /* update endpoint address(es) of template(s) */ 2863 static int xfrm_policy_migrate(struct xfrm_policy *pol, 2864 struct xfrm_migrate *m, int num_migrate) 2865 { 2866 struct xfrm_migrate *mp; 2867 int i, j, n = 0; 2868 2869 write_lock_bh(&pol->lock); 2870 if (unlikely(pol->walk.dead)) { 2871 /* target policy has been deleted */ 2872 write_unlock_bh(&pol->lock); 2873 return -ENOENT; 2874 } 2875 2876 for (i = 0; i < pol->xfrm_nr; i++) { 2877 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 2878 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 2879 continue; 2880 n++; 2881 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 2882 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 2883 continue; 2884 /* update endpoints */ 2885 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 2886 sizeof(pol->xfrm_vec[i].id.daddr)); 2887 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 2888 sizeof(pol->xfrm_vec[i].saddr)); 2889 pol->xfrm_vec[i].encap_family = mp->new_family; 2890 /* flush bundles */ 2891 atomic_inc(&pol->genid); 2892 } 2893 } 2894 2895 write_unlock_bh(&pol->lock); 2896 2897 if (!n) 2898 return -ENODATA; 2899 2900 return 0; 2901 } 2902 2903 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) 2904 { 2905 int i, j; 2906 2907 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) 2908 return -EINVAL; 2909 2910 for (i = 0; i < num_migrate; i++) { 2911 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr, 2912 m[i].old_family) == 0) && 2913 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr, 2914 m[i].old_family) == 0)) 2915 return -EINVAL; 2916 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 2917 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 2918 return -EINVAL; 2919 2920 /* check if there is any duplicated entry */ 2921 for (j = i + 1; j < num_migrate; j++) { 2922 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 2923 sizeof(m[i].old_daddr)) && 2924 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 2925 sizeof(m[i].old_saddr)) && 2926 m[i].proto == m[j].proto && 2927 m[i].mode == m[j].mode && 2928 m[i].reqid == m[j].reqid && 2929 m[i].old_family == m[j].old_family) 2930 return -EINVAL; 2931 } 2932 } 2933 2934 return 0; 2935 } 2936 2937 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2938 struct xfrm_migrate *m, int num_migrate, 2939 struct xfrm_kmaddress *k) 2940 { 2941 int i, err, nx_cur = 0, nx_new = 0; 2942 struct xfrm_policy *pol = NULL; 2943 struct xfrm_state *x, *xc; 2944 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 2945 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 2946 struct xfrm_migrate *mp; 2947 2948 if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 2949 goto out; 2950 2951 /* Stage 1 - find policy */ 2952 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) { 2953 err = -ENOENT; 2954 goto out; 2955 } 2956 2957 /* Stage 2 - find and update state(s) */ 2958 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 2959 if ((x = xfrm_migrate_state_find(mp))) { 2960 x_cur[nx_cur] = x; 2961 nx_cur++; 2962 if ((xc = xfrm_state_migrate(x, mp))) { 2963 x_new[nx_new] = xc; 2964 nx_new++; 2965 } else { 2966 err = -ENODATA; 2967 goto restore_state; 2968 } 2969 } 2970 } 2971 2972 /* Stage 3 - update policy */ 2973 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) 2974 goto restore_state; 2975 2976 /* Stage 4 - delete old state(s) */ 2977 if (nx_cur) { 2978 xfrm_states_put(x_cur, nx_cur); 2979 xfrm_states_delete(x_cur, nx_cur); 2980 } 2981 2982 /* Stage 5 - announce */ 2983 km_migrate(sel, dir, type, m, num_migrate, k); 2984 2985 xfrm_pol_put(pol); 2986 2987 return 0; 2988 out: 2989 return err; 2990 2991 restore_state: 2992 if (pol) 2993 xfrm_pol_put(pol); 2994 if (nx_cur) 2995 xfrm_states_put(x_cur, nx_cur); 2996 if (nx_new) 2997 xfrm_states_delete(x_new, nx_new); 2998 2999 return err; 3000 } 3001 EXPORT_SYMBOL(xfrm_migrate); 3002 #endif 3003