1 /* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/kmod.h> 19 #include <linux/list.h> 20 #include <linux/spinlock.h> 21 #include <linux/workqueue.h> 22 #include <linux/notifier.h> 23 #include <linux/netdevice.h> 24 #include <linux/netfilter.h> 25 #include <linux/module.h> 26 #include <linux/cache.h> 27 #include <linux/audit.h> 28 #include <net/dst.h> 29 #include <net/flow.h> 30 #include <net/xfrm.h> 31 #include <net/ip.h> 32 #ifdef CONFIG_XFRM_STATISTICS 33 #include <net/snmp.h> 34 #endif 35 36 #include "xfrm_hash.h" 37 38 DEFINE_MUTEX(xfrm_cfg_mutex); 39 EXPORT_SYMBOL(xfrm_cfg_mutex); 40 41 static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); 42 static struct dst_entry *xfrm_policy_sk_bundles; 43 static DEFINE_RWLOCK(xfrm_policy_lock); 44 45 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 46 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 47 48 static struct kmem_cache *xfrm_dst_cache __read_mostly; 49 50 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); 51 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 52 static void xfrm_init_pmtu(struct dst_entry *dst); 53 static int stale_bundle(struct dst_entry *dst); 54 static int xfrm_bundle_ok(struct xfrm_dst *xdst); 55 56 57 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 58 int dir); 59 60 static inline bool 61 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 62 { 63 const struct flowi4 *fl4 = &fl->u.ip4; 64 65 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && 66 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && 67 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && 68 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && 69 (fl4->flowi4_proto == sel->proto || !sel->proto) && 70 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 71 } 72 73 static inline bool 74 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 75 { 76 const struct flowi6 *fl6 = &fl->u.ip6; 77 78 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && 79 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && 80 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && 81 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && 82 (fl6->flowi6_proto == sel->proto || !sel->proto) && 83 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 84 } 85 86 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 87 unsigned short family) 88 { 89 switch (family) { 90 case AF_INET: 91 return __xfrm4_selector_match(sel, fl); 92 case AF_INET6: 93 return __xfrm6_selector_match(sel, fl); 94 } 95 return false; 96 } 97 98 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, 99 const xfrm_address_t *saddr, 100 const xfrm_address_t *daddr, 101 int family) 102 { 103 struct xfrm_policy_afinfo *afinfo; 104 struct dst_entry *dst; 105 106 afinfo = xfrm_policy_get_afinfo(family); 107 if (unlikely(afinfo == NULL)) 108 return ERR_PTR(-EAFNOSUPPORT); 109 110 dst = afinfo->dst_lookup(net, tos, saddr, daddr); 111 112 xfrm_policy_put_afinfo(afinfo); 113 114 return dst; 115 } 116 117 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, 118 xfrm_address_t *prev_saddr, 119 xfrm_address_t *prev_daddr, 120 int family) 121 { 122 struct net *net = xs_net(x); 123 xfrm_address_t *saddr = &x->props.saddr; 124 xfrm_address_t *daddr = &x->id.daddr; 125 struct dst_entry *dst; 126 127 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 128 saddr = x->coaddr; 129 daddr = prev_daddr; 130 } 131 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 132 saddr = prev_saddr; 133 daddr = x->coaddr; 134 } 135 136 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family); 137 138 if (!IS_ERR(dst)) { 139 if (prev_saddr != saddr) 140 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 141 if (prev_daddr != daddr) 142 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 143 } 144 145 return dst; 146 } 147 148 static inline unsigned long make_jiffies(long secs) 149 { 150 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 151 return MAX_SCHEDULE_TIMEOUT-1; 152 else 153 return secs*HZ; 154 } 155 156 static void xfrm_policy_timer(unsigned long data) 157 { 158 struct xfrm_policy *xp = (struct xfrm_policy*)data; 159 unsigned long now = get_seconds(); 160 long next = LONG_MAX; 161 int warn = 0; 162 int dir; 163 164 read_lock(&xp->lock); 165 166 if (unlikely(xp->walk.dead)) 167 goto out; 168 169 dir = xfrm_policy_id2dir(xp->index); 170 171 if (xp->lft.hard_add_expires_seconds) { 172 long tmo = xp->lft.hard_add_expires_seconds + 173 xp->curlft.add_time - now; 174 if (tmo <= 0) 175 goto expired; 176 if (tmo < next) 177 next = tmo; 178 } 179 if (xp->lft.hard_use_expires_seconds) { 180 long tmo = xp->lft.hard_use_expires_seconds + 181 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 182 if (tmo <= 0) 183 goto expired; 184 if (tmo < next) 185 next = tmo; 186 } 187 if (xp->lft.soft_add_expires_seconds) { 188 long tmo = xp->lft.soft_add_expires_seconds + 189 xp->curlft.add_time - now; 190 if (tmo <= 0) { 191 warn = 1; 192 tmo = XFRM_KM_TIMEOUT; 193 } 194 if (tmo < next) 195 next = tmo; 196 } 197 if (xp->lft.soft_use_expires_seconds) { 198 long tmo = xp->lft.soft_use_expires_seconds + 199 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 200 if (tmo <= 0) { 201 warn = 1; 202 tmo = XFRM_KM_TIMEOUT; 203 } 204 if (tmo < next) 205 next = tmo; 206 } 207 208 if (warn) 209 km_policy_expired(xp, dir, 0, 0); 210 if (next != LONG_MAX && 211 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 212 xfrm_pol_hold(xp); 213 214 out: 215 read_unlock(&xp->lock); 216 xfrm_pol_put(xp); 217 return; 218 219 expired: 220 read_unlock(&xp->lock); 221 if (!xfrm_policy_delete(xp, dir)) 222 km_policy_expired(xp, dir, 1, 0); 223 xfrm_pol_put(xp); 224 } 225 226 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) 227 { 228 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 229 230 if (unlikely(pol->walk.dead)) 231 flo = NULL; 232 else 233 xfrm_pol_hold(pol); 234 235 return flo; 236 } 237 238 static int xfrm_policy_flo_check(struct flow_cache_object *flo) 239 { 240 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 241 242 return !pol->walk.dead; 243 } 244 245 static void xfrm_policy_flo_delete(struct flow_cache_object *flo) 246 { 247 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); 248 } 249 250 static const struct flow_cache_ops xfrm_policy_fc_ops = { 251 .get = xfrm_policy_flo_get, 252 .check = xfrm_policy_flo_check, 253 .delete = xfrm_policy_flo_delete, 254 }; 255 256 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 257 * SPD calls. 258 */ 259 260 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) 261 { 262 struct xfrm_policy *policy; 263 264 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 265 266 if (policy) { 267 write_pnet(&policy->xp_net, net); 268 INIT_LIST_HEAD(&policy->walk.all); 269 INIT_HLIST_NODE(&policy->bydst); 270 INIT_HLIST_NODE(&policy->byidx); 271 rwlock_init(&policy->lock); 272 atomic_set(&policy->refcnt, 1); 273 setup_timer(&policy->timer, xfrm_policy_timer, 274 (unsigned long)policy); 275 policy->flo.ops = &xfrm_policy_fc_ops; 276 } 277 return policy; 278 } 279 EXPORT_SYMBOL(xfrm_policy_alloc); 280 281 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 282 283 void xfrm_policy_destroy(struct xfrm_policy *policy) 284 { 285 BUG_ON(!policy->walk.dead); 286 287 if (del_timer(&policy->timer)) 288 BUG(); 289 290 security_xfrm_policy_free(policy->security); 291 kfree(policy); 292 } 293 EXPORT_SYMBOL(xfrm_policy_destroy); 294 295 /* Rule must be locked. Release descentant resources, announce 296 * entry dead. The rule must be unlinked from lists to the moment. 297 */ 298 299 static void xfrm_policy_kill(struct xfrm_policy *policy) 300 { 301 policy->walk.dead = 1; 302 303 atomic_inc(&policy->genid); 304 305 if (del_timer(&policy->timer)) 306 xfrm_pol_put(policy); 307 308 xfrm_pol_put(policy); 309 } 310 311 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 312 313 static inline unsigned int idx_hash(struct net *net, u32 index) 314 { 315 return __idx_hash(index, net->xfrm.policy_idx_hmask); 316 } 317 318 static struct hlist_head *policy_hash_bysel(struct net *net, 319 const struct xfrm_selector *sel, 320 unsigned short family, int dir) 321 { 322 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 323 unsigned int hash = __sel_hash(sel, family, hmask); 324 325 return (hash == hmask + 1 ? 326 &net->xfrm.policy_inexact[dir] : 327 net->xfrm.policy_bydst[dir].table + hash); 328 } 329 330 static struct hlist_head *policy_hash_direct(struct net *net, 331 const xfrm_address_t *daddr, 332 const xfrm_address_t *saddr, 333 unsigned short family, int dir) 334 { 335 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 336 unsigned int hash = __addr_hash(daddr, saddr, family, hmask); 337 338 return net->xfrm.policy_bydst[dir].table + hash; 339 } 340 341 static void xfrm_dst_hash_transfer(struct hlist_head *list, 342 struct hlist_head *ndsttable, 343 unsigned int nhashmask) 344 { 345 struct hlist_node *entry, *tmp, *entry0 = NULL; 346 struct xfrm_policy *pol; 347 unsigned int h0 = 0; 348 349 redo: 350 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { 351 unsigned int h; 352 353 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 354 pol->family, nhashmask); 355 if (!entry0) { 356 hlist_del(entry); 357 hlist_add_head(&pol->bydst, ndsttable+h); 358 h0 = h; 359 } else { 360 if (h != h0) 361 continue; 362 hlist_del(entry); 363 hlist_add_after(entry0, &pol->bydst); 364 } 365 entry0 = entry; 366 } 367 if (!hlist_empty(list)) { 368 entry0 = NULL; 369 goto redo; 370 } 371 } 372 373 static void xfrm_idx_hash_transfer(struct hlist_head *list, 374 struct hlist_head *nidxtable, 375 unsigned int nhashmask) 376 { 377 struct hlist_node *entry, *tmp; 378 struct xfrm_policy *pol; 379 380 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { 381 unsigned int h; 382 383 h = __idx_hash(pol->index, nhashmask); 384 hlist_add_head(&pol->byidx, nidxtable+h); 385 } 386 } 387 388 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 389 { 390 return ((old_hmask + 1) << 1) - 1; 391 } 392 393 static void xfrm_bydst_resize(struct net *net, int dir) 394 { 395 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 396 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 397 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 398 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table; 399 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 400 int i; 401 402 if (!ndst) 403 return; 404 405 write_lock_bh(&xfrm_policy_lock); 406 407 for (i = hmask; i >= 0; i--) 408 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); 409 410 net->xfrm.policy_bydst[dir].table = ndst; 411 net->xfrm.policy_bydst[dir].hmask = nhashmask; 412 413 write_unlock_bh(&xfrm_policy_lock); 414 415 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 416 } 417 418 static void xfrm_byidx_resize(struct net *net, int total) 419 { 420 unsigned int hmask = net->xfrm.policy_idx_hmask; 421 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 422 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 423 struct hlist_head *oidx = net->xfrm.policy_byidx; 424 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 425 int i; 426 427 if (!nidx) 428 return; 429 430 write_lock_bh(&xfrm_policy_lock); 431 432 for (i = hmask; i >= 0; i--) 433 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 434 435 net->xfrm.policy_byidx = nidx; 436 net->xfrm.policy_idx_hmask = nhashmask; 437 438 write_unlock_bh(&xfrm_policy_lock); 439 440 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 441 } 442 443 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) 444 { 445 unsigned int cnt = net->xfrm.policy_count[dir]; 446 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 447 448 if (total) 449 *total += cnt; 450 451 if ((hmask + 1) < xfrm_policy_hashmax && 452 cnt > hmask) 453 return 1; 454 455 return 0; 456 } 457 458 static inline int xfrm_byidx_should_resize(struct net *net, int total) 459 { 460 unsigned int hmask = net->xfrm.policy_idx_hmask; 461 462 if ((hmask + 1) < xfrm_policy_hashmax && 463 total > hmask) 464 return 1; 465 466 return 0; 467 } 468 469 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 470 { 471 read_lock_bh(&xfrm_policy_lock); 472 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 473 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 474 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 475 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 476 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 477 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 478 si->spdhcnt = net->xfrm.policy_idx_hmask; 479 si->spdhmcnt = xfrm_policy_hashmax; 480 read_unlock_bh(&xfrm_policy_lock); 481 } 482 EXPORT_SYMBOL(xfrm_spd_getinfo); 483 484 static DEFINE_MUTEX(hash_resize_mutex); 485 static void xfrm_hash_resize(struct work_struct *work) 486 { 487 struct net *net = container_of(work, struct net, xfrm.policy_hash_work); 488 int dir, total; 489 490 mutex_lock(&hash_resize_mutex); 491 492 total = 0; 493 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 494 if (xfrm_bydst_should_resize(net, dir, &total)) 495 xfrm_bydst_resize(net, dir); 496 } 497 if (xfrm_byidx_should_resize(net, total)) 498 xfrm_byidx_resize(net, total); 499 500 mutex_unlock(&hash_resize_mutex); 501 } 502 503 /* Generate new index... KAME seems to generate them ordered by cost 504 * of an absolute inpredictability of ordering of rules. This will not pass. */ 505 static u32 xfrm_gen_index(struct net *net, int dir) 506 { 507 static u32 idx_generator; 508 509 for (;;) { 510 struct hlist_node *entry; 511 struct hlist_head *list; 512 struct xfrm_policy *p; 513 u32 idx; 514 int found; 515 516 idx = (idx_generator | dir); 517 idx_generator += 8; 518 if (idx == 0) 519 idx = 8; 520 list = net->xfrm.policy_byidx + idx_hash(net, idx); 521 found = 0; 522 hlist_for_each_entry(p, entry, list, byidx) { 523 if (p->index == idx) { 524 found = 1; 525 break; 526 } 527 } 528 if (!found) 529 return idx; 530 } 531 } 532 533 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 534 { 535 u32 *p1 = (u32 *) s1; 536 u32 *p2 = (u32 *) s2; 537 int len = sizeof(struct xfrm_selector) / sizeof(u32); 538 int i; 539 540 for (i = 0; i < len; i++) { 541 if (p1[i] != p2[i]) 542 return 1; 543 } 544 545 return 0; 546 } 547 548 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 549 { 550 struct net *net = xp_net(policy); 551 struct xfrm_policy *pol; 552 struct xfrm_policy *delpol; 553 struct hlist_head *chain; 554 struct hlist_node *entry, *newpos; 555 u32 mark = policy->mark.v & policy->mark.m; 556 557 write_lock_bh(&xfrm_policy_lock); 558 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 559 delpol = NULL; 560 newpos = NULL; 561 hlist_for_each_entry(pol, entry, chain, bydst) { 562 if (pol->type == policy->type && 563 !selector_cmp(&pol->selector, &policy->selector) && 564 (mark & pol->mark.m) == pol->mark.v && 565 xfrm_sec_ctx_match(pol->security, policy->security) && 566 !WARN_ON(delpol)) { 567 if (excl) { 568 write_unlock_bh(&xfrm_policy_lock); 569 return -EEXIST; 570 } 571 delpol = pol; 572 if (policy->priority > pol->priority) 573 continue; 574 } else if (policy->priority >= pol->priority) { 575 newpos = &pol->bydst; 576 continue; 577 } 578 if (delpol) 579 break; 580 } 581 if (newpos) 582 hlist_add_after(newpos, &policy->bydst); 583 else 584 hlist_add_head(&policy->bydst, chain); 585 xfrm_pol_hold(policy); 586 net->xfrm.policy_count[dir]++; 587 atomic_inc(&flow_cache_genid); 588 if (delpol) 589 __xfrm_policy_unlink(delpol, dir); 590 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); 591 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 592 policy->curlft.add_time = get_seconds(); 593 policy->curlft.use_time = 0; 594 if (!mod_timer(&policy->timer, jiffies + HZ)) 595 xfrm_pol_hold(policy); 596 list_add(&policy->walk.all, &net->xfrm.policy_all); 597 write_unlock_bh(&xfrm_policy_lock); 598 599 if (delpol) 600 xfrm_policy_kill(delpol); 601 else if (xfrm_bydst_should_resize(net, dir, NULL)) 602 schedule_work(&net->xfrm.policy_hash_work); 603 604 return 0; 605 } 606 EXPORT_SYMBOL(xfrm_policy_insert); 607 608 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, 609 int dir, struct xfrm_selector *sel, 610 struct xfrm_sec_ctx *ctx, int delete, 611 int *err) 612 { 613 struct xfrm_policy *pol, *ret; 614 struct hlist_head *chain; 615 struct hlist_node *entry; 616 617 *err = 0; 618 write_lock_bh(&xfrm_policy_lock); 619 chain = policy_hash_bysel(net, sel, sel->family, dir); 620 ret = NULL; 621 hlist_for_each_entry(pol, entry, chain, bydst) { 622 if (pol->type == type && 623 (mark & pol->mark.m) == pol->mark.v && 624 !selector_cmp(sel, &pol->selector) && 625 xfrm_sec_ctx_match(ctx, pol->security)) { 626 xfrm_pol_hold(pol); 627 if (delete) { 628 *err = security_xfrm_policy_delete( 629 pol->security); 630 if (*err) { 631 write_unlock_bh(&xfrm_policy_lock); 632 return pol; 633 } 634 __xfrm_policy_unlink(pol, dir); 635 } 636 ret = pol; 637 break; 638 } 639 } 640 write_unlock_bh(&xfrm_policy_lock); 641 642 if (ret && delete) 643 xfrm_policy_kill(ret); 644 return ret; 645 } 646 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 647 648 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, 649 int dir, u32 id, int delete, int *err) 650 { 651 struct xfrm_policy *pol, *ret; 652 struct hlist_head *chain; 653 struct hlist_node *entry; 654 655 *err = -ENOENT; 656 if (xfrm_policy_id2dir(id) != dir) 657 return NULL; 658 659 *err = 0; 660 write_lock_bh(&xfrm_policy_lock); 661 chain = net->xfrm.policy_byidx + idx_hash(net, id); 662 ret = NULL; 663 hlist_for_each_entry(pol, entry, chain, byidx) { 664 if (pol->type == type && pol->index == id && 665 (mark & pol->mark.m) == pol->mark.v) { 666 xfrm_pol_hold(pol); 667 if (delete) { 668 *err = security_xfrm_policy_delete( 669 pol->security); 670 if (*err) { 671 write_unlock_bh(&xfrm_policy_lock); 672 return pol; 673 } 674 __xfrm_policy_unlink(pol, dir); 675 } 676 ret = pol; 677 break; 678 } 679 } 680 write_unlock_bh(&xfrm_policy_lock); 681 682 if (ret && delete) 683 xfrm_policy_kill(ret); 684 return ret; 685 } 686 EXPORT_SYMBOL(xfrm_policy_byid); 687 688 #ifdef CONFIG_SECURITY_NETWORK_XFRM 689 static inline int 690 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 691 { 692 int dir, err = 0; 693 694 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 695 struct xfrm_policy *pol; 696 struct hlist_node *entry; 697 int i; 698 699 hlist_for_each_entry(pol, entry, 700 &net->xfrm.policy_inexact[dir], bydst) { 701 if (pol->type != type) 702 continue; 703 err = security_xfrm_policy_delete(pol->security); 704 if (err) { 705 xfrm_audit_policy_delete(pol, 0, 706 audit_info->loginuid, 707 audit_info->sessionid, 708 audit_info->secid); 709 return err; 710 } 711 } 712 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 713 hlist_for_each_entry(pol, entry, 714 net->xfrm.policy_bydst[dir].table + i, 715 bydst) { 716 if (pol->type != type) 717 continue; 718 err = security_xfrm_policy_delete( 719 pol->security); 720 if (err) { 721 xfrm_audit_policy_delete(pol, 0, 722 audit_info->loginuid, 723 audit_info->sessionid, 724 audit_info->secid); 725 return err; 726 } 727 } 728 } 729 } 730 return err; 731 } 732 #else 733 static inline int 734 xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 735 { 736 return 0; 737 } 738 #endif 739 740 int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 741 { 742 int dir, err = 0, cnt = 0; 743 744 write_lock_bh(&xfrm_policy_lock); 745 746 err = xfrm_policy_flush_secctx_check(net, type, audit_info); 747 if (err) 748 goto out; 749 750 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 751 struct xfrm_policy *pol; 752 struct hlist_node *entry; 753 int i; 754 755 again1: 756 hlist_for_each_entry(pol, entry, 757 &net->xfrm.policy_inexact[dir], bydst) { 758 if (pol->type != type) 759 continue; 760 __xfrm_policy_unlink(pol, dir); 761 write_unlock_bh(&xfrm_policy_lock); 762 cnt++; 763 764 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 765 audit_info->sessionid, 766 audit_info->secid); 767 768 xfrm_policy_kill(pol); 769 770 write_lock_bh(&xfrm_policy_lock); 771 goto again1; 772 } 773 774 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 775 again2: 776 hlist_for_each_entry(pol, entry, 777 net->xfrm.policy_bydst[dir].table + i, 778 bydst) { 779 if (pol->type != type) 780 continue; 781 __xfrm_policy_unlink(pol, dir); 782 write_unlock_bh(&xfrm_policy_lock); 783 cnt++; 784 785 xfrm_audit_policy_delete(pol, 1, 786 audit_info->loginuid, 787 audit_info->sessionid, 788 audit_info->secid); 789 xfrm_policy_kill(pol); 790 791 write_lock_bh(&xfrm_policy_lock); 792 goto again2; 793 } 794 } 795 796 } 797 if (!cnt) 798 err = -ESRCH; 799 out: 800 write_unlock_bh(&xfrm_policy_lock); 801 return err; 802 } 803 EXPORT_SYMBOL(xfrm_policy_flush); 804 805 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 806 int (*func)(struct xfrm_policy *, int, int, void*), 807 void *data) 808 { 809 struct xfrm_policy *pol; 810 struct xfrm_policy_walk_entry *x; 811 int error = 0; 812 813 if (walk->type >= XFRM_POLICY_TYPE_MAX && 814 walk->type != XFRM_POLICY_TYPE_ANY) 815 return -EINVAL; 816 817 if (list_empty(&walk->walk.all) && walk->seq != 0) 818 return 0; 819 820 write_lock_bh(&xfrm_policy_lock); 821 if (list_empty(&walk->walk.all)) 822 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); 823 else 824 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all); 825 list_for_each_entry_from(x, &net->xfrm.policy_all, all) { 826 if (x->dead) 827 continue; 828 pol = container_of(x, struct xfrm_policy, walk); 829 if (walk->type != XFRM_POLICY_TYPE_ANY && 830 walk->type != pol->type) 831 continue; 832 error = func(pol, xfrm_policy_id2dir(pol->index), 833 walk->seq, data); 834 if (error) { 835 list_move_tail(&walk->walk.all, &x->all); 836 goto out; 837 } 838 walk->seq++; 839 } 840 if (walk->seq == 0) { 841 error = -ENOENT; 842 goto out; 843 } 844 list_del_init(&walk->walk.all); 845 out: 846 write_unlock_bh(&xfrm_policy_lock); 847 return error; 848 } 849 EXPORT_SYMBOL(xfrm_policy_walk); 850 851 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) 852 { 853 INIT_LIST_HEAD(&walk->walk.all); 854 walk->walk.dead = 1; 855 walk->type = type; 856 walk->seq = 0; 857 } 858 EXPORT_SYMBOL(xfrm_policy_walk_init); 859 860 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk) 861 { 862 if (list_empty(&walk->walk.all)) 863 return; 864 865 write_lock_bh(&xfrm_policy_lock); 866 list_del(&walk->walk.all); 867 write_unlock_bh(&xfrm_policy_lock); 868 } 869 EXPORT_SYMBOL(xfrm_policy_walk_done); 870 871 /* 872 * Find policy to apply to this flow. 873 * 874 * Returns 0 if policy found, else an -errno. 875 */ 876 static int xfrm_policy_match(const struct xfrm_policy *pol, 877 const struct flowi *fl, 878 u8 type, u16 family, int dir) 879 { 880 const struct xfrm_selector *sel = &pol->selector; 881 int ret = -ESRCH; 882 bool match; 883 884 if (pol->family != family || 885 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 886 pol->type != type) 887 return ret; 888 889 match = xfrm_selector_match(sel, fl, family); 890 if (match) 891 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, 892 dir); 893 894 return ret; 895 } 896 897 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, 898 const struct flowi *fl, 899 u16 family, u8 dir) 900 { 901 int err; 902 struct xfrm_policy *pol, *ret; 903 const xfrm_address_t *daddr, *saddr; 904 struct hlist_node *entry; 905 struct hlist_head *chain; 906 u32 priority = ~0U; 907 908 daddr = xfrm_flowi_daddr(fl, family); 909 saddr = xfrm_flowi_saddr(fl, family); 910 if (unlikely(!daddr || !saddr)) 911 return NULL; 912 913 read_lock_bh(&xfrm_policy_lock); 914 chain = policy_hash_direct(net, daddr, saddr, family, dir); 915 ret = NULL; 916 hlist_for_each_entry(pol, entry, chain, bydst) { 917 err = xfrm_policy_match(pol, fl, type, family, dir); 918 if (err) { 919 if (err == -ESRCH) 920 continue; 921 else { 922 ret = ERR_PTR(err); 923 goto fail; 924 } 925 } else { 926 ret = pol; 927 priority = ret->priority; 928 break; 929 } 930 } 931 chain = &net->xfrm.policy_inexact[dir]; 932 hlist_for_each_entry(pol, entry, chain, bydst) { 933 err = xfrm_policy_match(pol, fl, type, family, dir); 934 if (err) { 935 if (err == -ESRCH) 936 continue; 937 else { 938 ret = ERR_PTR(err); 939 goto fail; 940 } 941 } else if (pol->priority < priority) { 942 ret = pol; 943 break; 944 } 945 } 946 if (ret) 947 xfrm_pol_hold(ret); 948 fail: 949 read_unlock_bh(&xfrm_policy_lock); 950 951 return ret; 952 } 953 954 static struct xfrm_policy * 955 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir) 956 { 957 #ifdef CONFIG_XFRM_SUB_POLICY 958 struct xfrm_policy *pol; 959 960 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); 961 if (pol != NULL) 962 return pol; 963 #endif 964 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); 965 } 966 967 static struct flow_cache_object * 968 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, 969 u8 dir, struct flow_cache_object *old_obj, void *ctx) 970 { 971 struct xfrm_policy *pol; 972 973 if (old_obj) 974 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); 975 976 pol = __xfrm_policy_lookup(net, fl, family, dir); 977 if (IS_ERR_OR_NULL(pol)) 978 return ERR_CAST(pol); 979 980 /* Resolver returns two references: 981 * one for cache and one for caller of flow_cache_lookup() */ 982 xfrm_pol_hold(pol); 983 984 return &pol->flo; 985 } 986 987 static inline int policy_to_flow_dir(int dir) 988 { 989 if (XFRM_POLICY_IN == FLOW_DIR_IN && 990 XFRM_POLICY_OUT == FLOW_DIR_OUT && 991 XFRM_POLICY_FWD == FLOW_DIR_FWD) 992 return dir; 993 switch (dir) { 994 default: 995 case XFRM_POLICY_IN: 996 return FLOW_DIR_IN; 997 case XFRM_POLICY_OUT: 998 return FLOW_DIR_OUT; 999 case XFRM_POLICY_FWD: 1000 return FLOW_DIR_FWD; 1001 } 1002 } 1003 1004 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, 1005 const struct flowi *fl) 1006 { 1007 struct xfrm_policy *pol; 1008 1009 read_lock_bh(&xfrm_policy_lock); 1010 if ((pol = sk->sk_policy[dir]) != NULL) { 1011 bool match = xfrm_selector_match(&pol->selector, fl, 1012 sk->sk_family); 1013 int err = 0; 1014 1015 if (match) { 1016 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { 1017 pol = NULL; 1018 goto out; 1019 } 1020 err = security_xfrm_policy_lookup(pol->security, 1021 fl->flowi_secid, 1022 policy_to_flow_dir(dir)); 1023 if (!err) 1024 xfrm_pol_hold(pol); 1025 else if (err == -ESRCH) 1026 pol = NULL; 1027 else 1028 pol = ERR_PTR(err); 1029 } else 1030 pol = NULL; 1031 } 1032 out: 1033 read_unlock_bh(&xfrm_policy_lock); 1034 return pol; 1035 } 1036 1037 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 1038 { 1039 struct net *net = xp_net(pol); 1040 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector, 1041 pol->family, dir); 1042 1043 list_add(&pol->walk.all, &net->xfrm.policy_all); 1044 hlist_add_head(&pol->bydst, chain); 1045 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index)); 1046 net->xfrm.policy_count[dir]++; 1047 xfrm_pol_hold(pol); 1048 1049 if (xfrm_bydst_should_resize(net, dir, NULL)) 1050 schedule_work(&net->xfrm.policy_hash_work); 1051 } 1052 1053 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 1054 int dir) 1055 { 1056 struct net *net = xp_net(pol); 1057 1058 if (hlist_unhashed(&pol->bydst)) 1059 return NULL; 1060 1061 hlist_del(&pol->bydst); 1062 hlist_del(&pol->byidx); 1063 list_del(&pol->walk.all); 1064 net->xfrm.policy_count[dir]--; 1065 1066 return pol; 1067 } 1068 1069 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 1070 { 1071 write_lock_bh(&xfrm_policy_lock); 1072 pol = __xfrm_policy_unlink(pol, dir); 1073 write_unlock_bh(&xfrm_policy_lock); 1074 if (pol) { 1075 xfrm_policy_kill(pol); 1076 return 0; 1077 } 1078 return -ENOENT; 1079 } 1080 EXPORT_SYMBOL(xfrm_policy_delete); 1081 1082 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 1083 { 1084 struct net *net = xp_net(pol); 1085 struct xfrm_policy *old_pol; 1086 1087 #ifdef CONFIG_XFRM_SUB_POLICY 1088 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 1089 return -EINVAL; 1090 #endif 1091 1092 write_lock_bh(&xfrm_policy_lock); 1093 old_pol = sk->sk_policy[dir]; 1094 sk->sk_policy[dir] = pol; 1095 if (pol) { 1096 pol->curlft.add_time = get_seconds(); 1097 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir); 1098 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1099 } 1100 if (old_pol) 1101 /* Unlinking succeeds always. This is the only function 1102 * allowed to delete or replace socket policy. 1103 */ 1104 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1105 write_unlock_bh(&xfrm_policy_lock); 1106 1107 if (old_pol) { 1108 xfrm_policy_kill(old_pol); 1109 } 1110 return 0; 1111 } 1112 1113 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) 1114 { 1115 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); 1116 1117 if (newp) { 1118 newp->selector = old->selector; 1119 if (security_xfrm_policy_clone(old->security, 1120 &newp->security)) { 1121 kfree(newp); 1122 return NULL; /* ENOMEM */ 1123 } 1124 newp->lft = old->lft; 1125 newp->curlft = old->curlft; 1126 newp->mark = old->mark; 1127 newp->action = old->action; 1128 newp->flags = old->flags; 1129 newp->xfrm_nr = old->xfrm_nr; 1130 newp->index = old->index; 1131 newp->type = old->type; 1132 memcpy(newp->xfrm_vec, old->xfrm_vec, 1133 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 1134 write_lock_bh(&xfrm_policy_lock); 1135 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); 1136 write_unlock_bh(&xfrm_policy_lock); 1137 xfrm_pol_put(newp); 1138 } 1139 return newp; 1140 } 1141 1142 int __xfrm_sk_clone_policy(struct sock *sk) 1143 { 1144 struct xfrm_policy *p0 = sk->sk_policy[0], 1145 *p1 = sk->sk_policy[1]; 1146 1147 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 1148 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 1149 return -ENOMEM; 1150 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 1151 return -ENOMEM; 1152 return 0; 1153 } 1154 1155 static int 1156 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote, 1157 unsigned short family) 1158 { 1159 int err; 1160 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1161 1162 if (unlikely(afinfo == NULL)) 1163 return -EINVAL; 1164 err = afinfo->get_saddr(net, local, remote); 1165 xfrm_policy_put_afinfo(afinfo); 1166 return err; 1167 } 1168 1169 /* Resolve list of templates for the flow, given policy. */ 1170 1171 static int 1172 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, 1173 struct xfrm_state **xfrm, unsigned short family) 1174 { 1175 struct net *net = xp_net(policy); 1176 int nx; 1177 int i, error; 1178 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 1179 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 1180 xfrm_address_t tmp; 1181 1182 for (nx=0, i = 0; i < policy->xfrm_nr; i++) { 1183 struct xfrm_state *x; 1184 xfrm_address_t *remote = daddr; 1185 xfrm_address_t *local = saddr; 1186 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 1187 1188 if (tmpl->mode == XFRM_MODE_TUNNEL || 1189 tmpl->mode == XFRM_MODE_BEET) { 1190 remote = &tmpl->id.daddr; 1191 local = &tmpl->saddr; 1192 if (xfrm_addr_any(local, tmpl->encap_family)) { 1193 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); 1194 if (error) 1195 goto fail; 1196 local = &tmp; 1197 } 1198 } 1199 1200 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 1201 1202 if (x && x->km.state == XFRM_STATE_VALID) { 1203 xfrm[nx++] = x; 1204 daddr = remote; 1205 saddr = local; 1206 continue; 1207 } 1208 if (x) { 1209 error = (x->km.state == XFRM_STATE_ERROR ? 1210 -EINVAL : -EAGAIN); 1211 xfrm_state_put(x); 1212 } 1213 else if (error == -ESRCH) 1214 error = -EAGAIN; 1215 1216 if (!tmpl->optional) 1217 goto fail; 1218 } 1219 return nx; 1220 1221 fail: 1222 for (nx--; nx>=0; nx--) 1223 xfrm_state_put(xfrm[nx]); 1224 return error; 1225 } 1226 1227 static int 1228 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, 1229 struct xfrm_state **xfrm, unsigned short family) 1230 { 1231 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 1232 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 1233 int cnx = 0; 1234 int error; 1235 int ret; 1236 int i; 1237 1238 for (i = 0; i < npols; i++) { 1239 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 1240 error = -ENOBUFS; 1241 goto fail; 1242 } 1243 1244 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 1245 if (ret < 0) { 1246 error = ret; 1247 goto fail; 1248 } else 1249 cnx += ret; 1250 } 1251 1252 /* found states are sorted for outbound processing */ 1253 if (npols > 1) 1254 xfrm_state_sort(xfrm, tpp, cnx, family); 1255 1256 return cnx; 1257 1258 fail: 1259 for (cnx--; cnx>=0; cnx--) 1260 xfrm_state_put(tpp[cnx]); 1261 return error; 1262 1263 } 1264 1265 /* Check that the bundle accepts the flow and its components are 1266 * still valid. 1267 */ 1268 1269 static inline int xfrm_get_tos(const struct flowi *fl, int family) 1270 { 1271 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1272 int tos; 1273 1274 if (!afinfo) 1275 return -EINVAL; 1276 1277 tos = afinfo->get_tos(fl); 1278 1279 xfrm_policy_put_afinfo(afinfo); 1280 1281 return tos; 1282 } 1283 1284 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) 1285 { 1286 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1287 struct dst_entry *dst = &xdst->u.dst; 1288 1289 if (xdst->route == NULL) { 1290 /* Dummy bundle - if it has xfrms we were not 1291 * able to build bundle as template resolution failed. 1292 * It means we need to try again resolving. */ 1293 if (xdst->num_xfrms > 0) 1294 return NULL; 1295 } else { 1296 /* Real bundle */ 1297 if (stale_bundle(dst)) 1298 return NULL; 1299 } 1300 1301 dst_hold(dst); 1302 return flo; 1303 } 1304 1305 static int xfrm_bundle_flo_check(struct flow_cache_object *flo) 1306 { 1307 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1308 struct dst_entry *dst = &xdst->u.dst; 1309 1310 if (!xdst->route) 1311 return 0; 1312 if (stale_bundle(dst)) 1313 return 0; 1314 1315 return 1; 1316 } 1317 1318 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) 1319 { 1320 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1321 struct dst_entry *dst = &xdst->u.dst; 1322 1323 dst_free(dst); 1324 } 1325 1326 static const struct flow_cache_ops xfrm_bundle_fc_ops = { 1327 .get = xfrm_bundle_flo_get, 1328 .check = xfrm_bundle_flo_check, 1329 .delete = xfrm_bundle_flo_delete, 1330 }; 1331 1332 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 1333 { 1334 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1335 struct dst_ops *dst_ops; 1336 struct xfrm_dst *xdst; 1337 1338 if (!afinfo) 1339 return ERR_PTR(-EINVAL); 1340 1341 switch (family) { 1342 case AF_INET: 1343 dst_ops = &net->xfrm.xfrm4_dst_ops; 1344 break; 1345 #if IS_ENABLED(CONFIG_IPV6) 1346 case AF_INET6: 1347 dst_ops = &net->xfrm.xfrm6_dst_ops; 1348 break; 1349 #endif 1350 default: 1351 BUG(); 1352 } 1353 xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); 1354 1355 if (likely(xdst)) { 1356 memset(&xdst->u.rt6.rt6i_table, 0, 1357 sizeof(*xdst) - sizeof(struct dst_entry)); 1358 xdst->flo.ops = &xfrm_bundle_fc_ops; 1359 } else 1360 xdst = ERR_PTR(-ENOBUFS); 1361 1362 xfrm_policy_put_afinfo(afinfo); 1363 1364 return xdst; 1365 } 1366 1367 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 1368 int nfheader_len) 1369 { 1370 struct xfrm_policy_afinfo *afinfo = 1371 xfrm_policy_get_afinfo(dst->ops->family); 1372 int err; 1373 1374 if (!afinfo) 1375 return -EINVAL; 1376 1377 err = afinfo->init_path(path, dst, nfheader_len); 1378 1379 xfrm_policy_put_afinfo(afinfo); 1380 1381 return err; 1382 } 1383 1384 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, 1385 const struct flowi *fl) 1386 { 1387 struct xfrm_policy_afinfo *afinfo = 1388 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 1389 int err; 1390 1391 if (!afinfo) 1392 return -EINVAL; 1393 1394 err = afinfo->fill_dst(xdst, dev, fl); 1395 1396 xfrm_policy_put_afinfo(afinfo); 1397 1398 return err; 1399 } 1400 1401 1402 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 1403 * all the metrics... Shortly, bundle a bundle. 1404 */ 1405 1406 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 1407 struct xfrm_state **xfrm, int nx, 1408 const struct flowi *fl, 1409 struct dst_entry *dst) 1410 { 1411 struct net *net = xp_net(policy); 1412 unsigned long now = jiffies; 1413 struct net_device *dev; 1414 struct xfrm_mode *inner_mode; 1415 struct dst_entry *dst_prev = NULL; 1416 struct dst_entry *dst0 = NULL; 1417 int i = 0; 1418 int err; 1419 int header_len = 0; 1420 int nfheader_len = 0; 1421 int trailer_len = 0; 1422 int tos; 1423 int family = policy->selector.family; 1424 xfrm_address_t saddr, daddr; 1425 1426 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 1427 1428 tos = xfrm_get_tos(fl, family); 1429 err = tos; 1430 if (tos < 0) 1431 goto put_states; 1432 1433 dst_hold(dst); 1434 1435 for (; i < nx; i++) { 1436 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 1437 struct dst_entry *dst1 = &xdst->u.dst; 1438 1439 err = PTR_ERR(xdst); 1440 if (IS_ERR(xdst)) { 1441 dst_release(dst); 1442 goto put_states; 1443 } 1444 1445 if (xfrm[i]->sel.family == AF_UNSPEC) { 1446 inner_mode = xfrm_ip2inner_mode(xfrm[i], 1447 xfrm_af2proto(family)); 1448 if (!inner_mode) { 1449 err = -EAFNOSUPPORT; 1450 dst_release(dst); 1451 goto put_states; 1452 } 1453 } else 1454 inner_mode = xfrm[i]->inner_mode; 1455 1456 if (!dst_prev) 1457 dst0 = dst1; 1458 else { 1459 dst_prev->child = dst_clone(dst1); 1460 dst1->flags |= DST_NOHASH; 1461 } 1462 1463 xdst->route = dst; 1464 dst_copy_metrics(dst1, dst); 1465 1466 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 1467 family = xfrm[i]->props.family; 1468 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr, 1469 family); 1470 err = PTR_ERR(dst); 1471 if (IS_ERR(dst)) 1472 goto put_states; 1473 } else 1474 dst_hold(dst); 1475 1476 dst1->xfrm = xfrm[i]; 1477 xdst->xfrm_genid = xfrm[i]->genid; 1478 1479 dst1->obsolete = -1; 1480 dst1->flags |= DST_HOST; 1481 dst1->lastuse = now; 1482 1483 dst1->input = dst_discard; 1484 dst1->output = inner_mode->afinfo->output; 1485 1486 dst1->next = dst_prev; 1487 dst_prev = dst1; 1488 1489 header_len += xfrm[i]->props.header_len; 1490 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 1491 nfheader_len += xfrm[i]->props.header_len; 1492 trailer_len += xfrm[i]->props.trailer_len; 1493 } 1494 1495 dst_prev->child = dst; 1496 dst0->path = dst; 1497 1498 err = -ENODEV; 1499 dev = dst->dev; 1500 if (!dev) 1501 goto free_dst; 1502 1503 /* Copy neighbour for reachability confirmation */ 1504 dst_set_neighbour(dst0, neigh_clone(dst_get_neighbour_noref(dst))); 1505 1506 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1507 xfrm_init_pmtu(dst_prev); 1508 1509 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { 1510 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; 1511 1512 err = xfrm_fill_dst(xdst, dev, fl); 1513 if (err) 1514 goto free_dst; 1515 1516 dst_prev->header_len = header_len; 1517 dst_prev->trailer_len = trailer_len; 1518 header_len -= xdst->u.dst.xfrm->props.header_len; 1519 trailer_len -= xdst->u.dst.xfrm->props.trailer_len; 1520 } 1521 1522 out: 1523 return dst0; 1524 1525 put_states: 1526 for (; i < nx; i++) 1527 xfrm_state_put(xfrm[i]); 1528 free_dst: 1529 if (dst0) 1530 dst_free(dst0); 1531 dst0 = ERR_PTR(err); 1532 goto out; 1533 } 1534 1535 static int inline 1536 xfrm_dst_alloc_copy(void **target, const void *src, int size) 1537 { 1538 if (!*target) { 1539 *target = kmalloc(size, GFP_ATOMIC); 1540 if (!*target) 1541 return -ENOMEM; 1542 } 1543 memcpy(*target, src, size); 1544 return 0; 1545 } 1546 1547 static int inline 1548 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel) 1549 { 1550 #ifdef CONFIG_XFRM_SUB_POLICY 1551 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1552 return xfrm_dst_alloc_copy((void **)&(xdst->partner), 1553 sel, sizeof(*sel)); 1554 #else 1555 return 0; 1556 #endif 1557 } 1558 1559 static int inline 1560 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl) 1561 { 1562 #ifdef CONFIG_XFRM_SUB_POLICY 1563 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1564 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); 1565 #else 1566 return 0; 1567 #endif 1568 } 1569 1570 static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1571 struct xfrm_policy **pols, 1572 int *num_pols, int *num_xfrms) 1573 { 1574 int i; 1575 1576 if (*num_pols == 0 || !pols[0]) { 1577 *num_pols = 0; 1578 *num_xfrms = 0; 1579 return 0; 1580 } 1581 if (IS_ERR(pols[0])) 1582 return PTR_ERR(pols[0]); 1583 1584 *num_xfrms = pols[0]->xfrm_nr; 1585 1586 #ifdef CONFIG_XFRM_SUB_POLICY 1587 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && 1588 pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 1589 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), 1590 XFRM_POLICY_TYPE_MAIN, 1591 fl, family, 1592 XFRM_POLICY_OUT); 1593 if (pols[1]) { 1594 if (IS_ERR(pols[1])) { 1595 xfrm_pols_put(pols, *num_pols); 1596 return PTR_ERR(pols[1]); 1597 } 1598 (*num_pols) ++; 1599 (*num_xfrms) += pols[1]->xfrm_nr; 1600 } 1601 } 1602 #endif 1603 for (i = 0; i < *num_pols; i++) { 1604 if (pols[i]->action != XFRM_POLICY_ALLOW) { 1605 *num_xfrms = -1; 1606 break; 1607 } 1608 } 1609 1610 return 0; 1611 1612 } 1613 1614 static struct xfrm_dst * 1615 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, 1616 const struct flowi *fl, u16 family, 1617 struct dst_entry *dst_orig) 1618 { 1619 struct net *net = xp_net(pols[0]); 1620 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 1621 struct dst_entry *dst; 1622 struct xfrm_dst *xdst; 1623 int err; 1624 1625 /* Try to instantiate a bundle */ 1626 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1627 if (err <= 0) { 1628 if (err != 0 && err != -EAGAIN) 1629 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1630 return ERR_PTR(err); 1631 } 1632 1633 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); 1634 if (IS_ERR(dst)) { 1635 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); 1636 return ERR_CAST(dst); 1637 } 1638 1639 xdst = (struct xfrm_dst *)dst; 1640 xdst->num_xfrms = err; 1641 if (num_pols > 1) 1642 err = xfrm_dst_update_parent(dst, &pols[1]->selector); 1643 else 1644 err = xfrm_dst_update_origin(dst, fl); 1645 if (unlikely(err)) { 1646 dst_free(dst); 1647 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); 1648 return ERR_PTR(err); 1649 } 1650 1651 xdst->num_pols = num_pols; 1652 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1653 xdst->policy_genid = atomic_read(&pols[0]->genid); 1654 1655 return xdst; 1656 } 1657 1658 static struct flow_cache_object * 1659 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, 1660 struct flow_cache_object *oldflo, void *ctx) 1661 { 1662 struct dst_entry *dst_orig = (struct dst_entry *)ctx; 1663 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1664 struct xfrm_dst *xdst, *new_xdst; 1665 int num_pols = 0, num_xfrms = 0, i, err, pol_dead; 1666 1667 /* Check if the policies from old bundle are usable */ 1668 xdst = NULL; 1669 if (oldflo) { 1670 xdst = container_of(oldflo, struct xfrm_dst, flo); 1671 num_pols = xdst->num_pols; 1672 num_xfrms = xdst->num_xfrms; 1673 pol_dead = 0; 1674 for (i = 0; i < num_pols; i++) { 1675 pols[i] = xdst->pols[i]; 1676 pol_dead |= pols[i]->walk.dead; 1677 } 1678 if (pol_dead) { 1679 dst_free(&xdst->u.dst); 1680 xdst = NULL; 1681 num_pols = 0; 1682 num_xfrms = 0; 1683 oldflo = NULL; 1684 } 1685 } 1686 1687 /* Resolve policies to use if we couldn't get them from 1688 * previous cache entry */ 1689 if (xdst == NULL) { 1690 num_pols = 1; 1691 pols[0] = __xfrm_policy_lookup(net, fl, family, dir); 1692 err = xfrm_expand_policies(fl, family, pols, 1693 &num_pols, &num_xfrms); 1694 if (err < 0) 1695 goto inc_error; 1696 if (num_pols == 0) 1697 return NULL; 1698 if (num_xfrms <= 0) 1699 goto make_dummy_bundle; 1700 } 1701 1702 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); 1703 if (IS_ERR(new_xdst)) { 1704 err = PTR_ERR(new_xdst); 1705 if (err != -EAGAIN) 1706 goto error; 1707 if (oldflo == NULL) 1708 goto make_dummy_bundle; 1709 dst_hold(&xdst->u.dst); 1710 return oldflo; 1711 } else if (new_xdst == NULL) { 1712 num_xfrms = 0; 1713 if (oldflo == NULL) 1714 goto make_dummy_bundle; 1715 xdst->num_xfrms = 0; 1716 dst_hold(&xdst->u.dst); 1717 return oldflo; 1718 } 1719 1720 /* Kill the previous bundle */ 1721 if (xdst) { 1722 /* The policies were stolen for newly generated bundle */ 1723 xdst->num_pols = 0; 1724 dst_free(&xdst->u.dst); 1725 } 1726 1727 /* Flow cache does not have reference, it dst_free()'s, 1728 * but we do need to return one reference for original caller */ 1729 dst_hold(&new_xdst->u.dst); 1730 return &new_xdst->flo; 1731 1732 make_dummy_bundle: 1733 /* We found policies, but there's no bundles to instantiate: 1734 * either because the policy blocks, has no transformations or 1735 * we could not build template (no xfrm_states).*/ 1736 xdst = xfrm_alloc_dst(net, family); 1737 if (IS_ERR(xdst)) { 1738 xfrm_pols_put(pols, num_pols); 1739 return ERR_CAST(xdst); 1740 } 1741 xdst->num_pols = num_pols; 1742 xdst->num_xfrms = num_xfrms; 1743 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1744 1745 dst_hold(&xdst->u.dst); 1746 return &xdst->flo; 1747 1748 inc_error: 1749 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1750 error: 1751 if (xdst != NULL) 1752 dst_free(&xdst->u.dst); 1753 else 1754 xfrm_pols_put(pols, num_pols); 1755 return ERR_PTR(err); 1756 } 1757 1758 static struct dst_entry *make_blackhole(struct net *net, u16 family, 1759 struct dst_entry *dst_orig) 1760 { 1761 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1762 struct dst_entry *ret; 1763 1764 if (!afinfo) { 1765 dst_release(dst_orig); 1766 ret = ERR_PTR(-EINVAL); 1767 } else { 1768 ret = afinfo->blackhole_route(net, dst_orig); 1769 } 1770 xfrm_policy_put_afinfo(afinfo); 1771 1772 return ret; 1773 } 1774 1775 /* Main function: finds/creates a bundle for given flow. 1776 * 1777 * At the moment we eat a raw IP route. Mostly to speed up lookups 1778 * on interfaces with disabled IPsec. 1779 */ 1780 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 1781 const struct flowi *fl, 1782 struct sock *sk, int flags) 1783 { 1784 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1785 struct flow_cache_object *flo; 1786 struct xfrm_dst *xdst; 1787 struct dst_entry *dst, *route; 1788 u16 family = dst_orig->ops->family; 1789 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 1790 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 1791 1792 restart: 1793 dst = NULL; 1794 xdst = NULL; 1795 route = NULL; 1796 1797 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 1798 num_pols = 1; 1799 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1800 err = xfrm_expand_policies(fl, family, pols, 1801 &num_pols, &num_xfrms); 1802 if (err < 0) 1803 goto dropdst; 1804 1805 if (num_pols) { 1806 if (num_xfrms <= 0) { 1807 drop_pols = num_pols; 1808 goto no_transform; 1809 } 1810 1811 xdst = xfrm_resolve_and_create_bundle( 1812 pols, num_pols, fl, 1813 family, dst_orig); 1814 if (IS_ERR(xdst)) { 1815 xfrm_pols_put(pols, num_pols); 1816 err = PTR_ERR(xdst); 1817 goto dropdst; 1818 } else if (xdst == NULL) { 1819 num_xfrms = 0; 1820 drop_pols = num_pols; 1821 goto no_transform; 1822 } 1823 1824 dst_hold(&xdst->u.dst); 1825 1826 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 1827 xdst->u.dst.next = xfrm_policy_sk_bundles; 1828 xfrm_policy_sk_bundles = &xdst->u.dst; 1829 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 1830 1831 route = xdst->route; 1832 } 1833 } 1834 1835 if (xdst == NULL) { 1836 /* To accelerate a bit... */ 1837 if ((dst_orig->flags & DST_NOXFRM) || 1838 !net->xfrm.policy_count[XFRM_POLICY_OUT]) 1839 goto nopol; 1840 1841 flo = flow_cache_lookup(net, fl, family, dir, 1842 xfrm_bundle_lookup, dst_orig); 1843 if (flo == NULL) 1844 goto nopol; 1845 if (IS_ERR(flo)) { 1846 err = PTR_ERR(flo); 1847 goto dropdst; 1848 } 1849 xdst = container_of(flo, struct xfrm_dst, flo); 1850 1851 num_pols = xdst->num_pols; 1852 num_xfrms = xdst->num_xfrms; 1853 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols); 1854 route = xdst->route; 1855 } 1856 1857 dst = &xdst->u.dst; 1858 if (route == NULL && num_xfrms > 0) { 1859 /* The only case when xfrm_bundle_lookup() returns a 1860 * bundle with null route, is when the template could 1861 * not be resolved. It means policies are there, but 1862 * bundle could not be created, since we don't yet 1863 * have the xfrm_state's. We need to wait for KM to 1864 * negotiate new SA's or bail out with error.*/ 1865 if (net->xfrm.sysctl_larval_drop) { 1866 /* EREMOTE tells the caller to generate 1867 * a one-shot blackhole route. */ 1868 dst_release(dst); 1869 xfrm_pols_put(pols, drop_pols); 1870 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1871 1872 return make_blackhole(net, family, dst_orig); 1873 } 1874 if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) { 1875 DECLARE_WAITQUEUE(wait, current); 1876 1877 add_wait_queue(&net->xfrm.km_waitq, &wait); 1878 set_current_state(TASK_INTERRUPTIBLE); 1879 schedule(); 1880 set_current_state(TASK_RUNNING); 1881 remove_wait_queue(&net->xfrm.km_waitq, &wait); 1882 1883 if (!signal_pending(current)) { 1884 dst_release(dst); 1885 goto restart; 1886 } 1887 1888 err = -ERESTART; 1889 } else 1890 err = -EAGAIN; 1891 1892 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1893 goto error; 1894 } 1895 1896 no_transform: 1897 if (num_pols == 0) 1898 goto nopol; 1899 1900 if ((flags & XFRM_LOOKUP_ICMP) && 1901 !(pols[0]->flags & XFRM_POLICY_ICMP)) { 1902 err = -ENOENT; 1903 goto error; 1904 } 1905 1906 for (i = 0; i < num_pols; i++) 1907 pols[i]->curlft.use_time = get_seconds(); 1908 1909 if (num_xfrms < 0) { 1910 /* Prohibit the flow */ 1911 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 1912 err = -EPERM; 1913 goto error; 1914 } else if (num_xfrms > 0) { 1915 /* Flow transformed */ 1916 dst_release(dst_orig); 1917 } else { 1918 /* Flow passes untransformed */ 1919 dst_release(dst); 1920 dst = dst_orig; 1921 } 1922 ok: 1923 xfrm_pols_put(pols, drop_pols); 1924 if (dst && dst->xfrm && 1925 dst->xfrm->props.mode == XFRM_MODE_TUNNEL) 1926 dst->flags |= DST_XFRM_TUNNEL; 1927 return dst; 1928 1929 nopol: 1930 if (!(flags & XFRM_LOOKUP_ICMP)) { 1931 dst = dst_orig; 1932 goto ok; 1933 } 1934 err = -ENOENT; 1935 error: 1936 dst_release(dst); 1937 dropdst: 1938 dst_release(dst_orig); 1939 xfrm_pols_put(pols, drop_pols); 1940 return ERR_PTR(err); 1941 } 1942 EXPORT_SYMBOL(xfrm_lookup); 1943 1944 static inline int 1945 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) 1946 { 1947 struct xfrm_state *x; 1948 1949 if (!skb->sp || idx < 0 || idx >= skb->sp->len) 1950 return 0; 1951 x = skb->sp->xvec[idx]; 1952 if (!x->type->reject) 1953 return 0; 1954 return x->type->reject(x, skb, fl); 1955 } 1956 1957 /* When skb is transformed back to its "native" form, we have to 1958 * check policy restrictions. At the moment we make this in maximally 1959 * stupid way. Shame on me. :-) Of course, connected sockets must 1960 * have policy cached at them. 1961 */ 1962 1963 static inline int 1964 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, 1965 unsigned short family) 1966 { 1967 if (xfrm_state_kern(x)) 1968 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 1969 return x->id.proto == tmpl->id.proto && 1970 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1971 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1972 x->props.mode == tmpl->mode && 1973 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 1974 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 1975 !(x->props.mode != XFRM_MODE_TRANSPORT && 1976 xfrm_state_addr_cmp(tmpl, x, family)); 1977 } 1978 1979 /* 1980 * 0 or more than 0 is returned when validation is succeeded (either bypass 1981 * because of optional transport mode, or next index of the mathced secpath 1982 * state with the template. 1983 * -1 is returned when no matching template is found. 1984 * Otherwise "-2 - errored_index" is returned. 1985 */ 1986 static inline int 1987 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, 1988 unsigned short family) 1989 { 1990 int idx = start; 1991 1992 if (tmpl->optional) { 1993 if (tmpl->mode == XFRM_MODE_TRANSPORT) 1994 return start; 1995 } else 1996 start = -1; 1997 for (; idx < sp->len; idx++) { 1998 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 1999 return ++idx; 2000 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 2001 if (start == -1) 2002 start = -2-idx; 2003 break; 2004 } 2005 } 2006 return start; 2007 } 2008 2009 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 2010 unsigned int family, int reverse) 2011 { 2012 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2013 int err; 2014 2015 if (unlikely(afinfo == NULL)) 2016 return -EAFNOSUPPORT; 2017 2018 afinfo->decode_session(skb, fl, reverse); 2019 err = security_xfrm_decode_session(skb, &fl->flowi_secid); 2020 xfrm_policy_put_afinfo(afinfo); 2021 return err; 2022 } 2023 EXPORT_SYMBOL(__xfrm_decode_session); 2024 2025 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) 2026 { 2027 for (; k < sp->len; k++) { 2028 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 2029 *idxp = k; 2030 return 1; 2031 } 2032 } 2033 2034 return 0; 2035 } 2036 2037 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 2038 unsigned short family) 2039 { 2040 struct net *net = dev_net(skb->dev); 2041 struct xfrm_policy *pol; 2042 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 2043 int npols = 0; 2044 int xfrm_nr; 2045 int pi; 2046 int reverse; 2047 struct flowi fl; 2048 u8 fl_dir; 2049 int xerr_idx = -1; 2050 2051 reverse = dir & ~XFRM_POLICY_MASK; 2052 dir &= XFRM_POLICY_MASK; 2053 fl_dir = policy_to_flow_dir(dir); 2054 2055 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { 2056 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 2057 return 0; 2058 } 2059 2060 nf_nat_decode_session(skb, &fl, family); 2061 2062 /* First, check used SA against their selectors. */ 2063 if (skb->sp) { 2064 int i; 2065 2066 for (i=skb->sp->len-1; i>=0; i--) { 2067 struct xfrm_state *x = skb->sp->xvec[i]; 2068 if (!xfrm_selector_match(&x->sel, &fl, family)) { 2069 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 2070 return 0; 2071 } 2072 } 2073 } 2074 2075 pol = NULL; 2076 if (sk && sk->sk_policy[dir]) { 2077 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2078 if (IS_ERR(pol)) { 2079 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2080 return 0; 2081 } 2082 } 2083 2084 if (!pol) { 2085 struct flow_cache_object *flo; 2086 2087 flo = flow_cache_lookup(net, &fl, family, fl_dir, 2088 xfrm_policy_lookup, NULL); 2089 if (IS_ERR_OR_NULL(flo)) 2090 pol = ERR_CAST(flo); 2091 else 2092 pol = container_of(flo, struct xfrm_policy, flo); 2093 } 2094 2095 if (IS_ERR(pol)) { 2096 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2097 return 0; 2098 } 2099 2100 if (!pol) { 2101 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { 2102 xfrm_secpath_reject(xerr_idx, skb, &fl); 2103 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 2104 return 0; 2105 } 2106 return 1; 2107 } 2108 2109 pol->curlft.use_time = get_seconds(); 2110 2111 pols[0] = pol; 2112 npols ++; 2113 #ifdef CONFIG_XFRM_SUB_POLICY 2114 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 2115 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, 2116 &fl, family, 2117 XFRM_POLICY_IN); 2118 if (pols[1]) { 2119 if (IS_ERR(pols[1])) { 2120 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2121 return 0; 2122 } 2123 pols[1]->curlft.use_time = get_seconds(); 2124 npols ++; 2125 } 2126 } 2127 #endif 2128 2129 if (pol->action == XFRM_POLICY_ALLOW) { 2130 struct sec_path *sp; 2131 static struct sec_path dummy; 2132 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 2133 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 2134 struct xfrm_tmpl **tpp = tp; 2135 int ti = 0; 2136 int i, k; 2137 2138 if ((sp = skb->sp) == NULL) 2139 sp = &dummy; 2140 2141 for (pi = 0; pi < npols; pi++) { 2142 if (pols[pi] != pol && 2143 pols[pi]->action != XFRM_POLICY_ALLOW) { 2144 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2145 goto reject; 2146 } 2147 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 2148 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 2149 goto reject_error; 2150 } 2151 for (i = 0; i < pols[pi]->xfrm_nr; i++) 2152 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 2153 } 2154 xfrm_nr = ti; 2155 if (npols > 1) { 2156 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); 2157 tpp = stp; 2158 } 2159 2160 /* For each tunnel xfrm, find the first matching tmpl. 2161 * For each tmpl before that, find corresponding xfrm. 2162 * Order is _important_. Later we will implement 2163 * some barriers, but at the moment barriers 2164 * are implied between each two transformations. 2165 */ 2166 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 2167 k = xfrm_policy_ok(tpp[i], sp, k, family); 2168 if (k < 0) { 2169 if (k < -1) 2170 /* "-2 - errored_index" returned */ 2171 xerr_idx = -(2+k); 2172 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2173 goto reject; 2174 } 2175 } 2176 2177 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 2178 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2179 goto reject; 2180 } 2181 2182 xfrm_pols_put(pols, npols); 2183 return 1; 2184 } 2185 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2186 2187 reject: 2188 xfrm_secpath_reject(xerr_idx, skb, &fl); 2189 reject_error: 2190 xfrm_pols_put(pols, npols); 2191 return 0; 2192 } 2193 EXPORT_SYMBOL(__xfrm_policy_check); 2194 2195 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 2196 { 2197 struct net *net = dev_net(skb->dev); 2198 struct flowi fl; 2199 struct dst_entry *dst; 2200 int res = 1; 2201 2202 if (xfrm_decode_session(skb, &fl, family) < 0) { 2203 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 2204 return 0; 2205 } 2206 2207 skb_dst_force(skb); 2208 2209 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0); 2210 if (IS_ERR(dst)) { 2211 res = 0; 2212 dst = NULL; 2213 } 2214 skb_dst_set(skb, dst); 2215 return res; 2216 } 2217 EXPORT_SYMBOL(__xfrm_route_forward); 2218 2219 /* Optimize later using cookies and generation ids. */ 2220 2221 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 2222 { 2223 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 2224 * to "-1" to force all XFRM destinations to get validated by 2225 * dst_ops->check on every use. We do this because when a 2226 * normal route referenced by an XFRM dst is obsoleted we do 2227 * not go looking around for all parent referencing XFRM dsts 2228 * so that we can invalidate them. It is just too much work. 2229 * Instead we make the checks here on every use. For example: 2230 * 2231 * XFRM dst A --> IPv4 dst X 2232 * 2233 * X is the "xdst->route" of A (X is also the "dst->path" of A 2234 * in this example). If X is marked obsolete, "A" will not 2235 * notice. That's what we are validating here via the 2236 * stale_bundle() check. 2237 * 2238 * When a policy's bundle is pruned, we dst_free() the XFRM 2239 * dst which causes it's ->obsolete field to be set to a 2240 * positive non-zero integer. If an XFRM dst has been pruned 2241 * like this, we want to force a new route lookup. 2242 */ 2243 if (dst->obsolete < 0 && !stale_bundle(dst)) 2244 return dst; 2245 2246 return NULL; 2247 } 2248 2249 static int stale_bundle(struct dst_entry *dst) 2250 { 2251 return !xfrm_bundle_ok((struct xfrm_dst *)dst); 2252 } 2253 2254 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2255 { 2256 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 2257 dst->dev = dev_net(dev)->loopback_dev; 2258 dev_hold(dst->dev); 2259 dev_put(dev); 2260 } 2261 } 2262 EXPORT_SYMBOL(xfrm_dst_ifdown); 2263 2264 static void xfrm_link_failure(struct sk_buff *skb) 2265 { 2266 /* Impossible. Such dst must be popped before reaches point of failure. */ 2267 } 2268 2269 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 2270 { 2271 if (dst) { 2272 if (dst->obsolete) { 2273 dst_release(dst); 2274 dst = NULL; 2275 } 2276 } 2277 return dst; 2278 } 2279 2280 static void __xfrm_garbage_collect(struct net *net) 2281 { 2282 struct dst_entry *head, *next; 2283 2284 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 2285 head = xfrm_policy_sk_bundles; 2286 xfrm_policy_sk_bundles = NULL; 2287 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 2288 2289 while (head) { 2290 next = head->next; 2291 dst_free(head); 2292 head = next; 2293 } 2294 } 2295 2296 static void xfrm_garbage_collect(struct net *net) 2297 { 2298 flow_cache_flush(); 2299 __xfrm_garbage_collect(net); 2300 } 2301 2302 static void xfrm_garbage_collect_deferred(struct net *net) 2303 { 2304 flow_cache_flush_deferred(); 2305 __xfrm_garbage_collect(net); 2306 } 2307 2308 static void xfrm_init_pmtu(struct dst_entry *dst) 2309 { 2310 do { 2311 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2312 u32 pmtu, route_mtu_cached; 2313 2314 pmtu = dst_mtu(dst->child); 2315 xdst->child_mtu_cached = pmtu; 2316 2317 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 2318 2319 route_mtu_cached = dst_mtu(xdst->route); 2320 xdst->route_mtu_cached = route_mtu_cached; 2321 2322 if (pmtu > route_mtu_cached) 2323 pmtu = route_mtu_cached; 2324 2325 dst_metric_set(dst, RTAX_MTU, pmtu); 2326 } while ((dst = dst->next)); 2327 } 2328 2329 /* Check that the bundle accepts the flow and its components are 2330 * still valid. 2331 */ 2332 2333 static int xfrm_bundle_ok(struct xfrm_dst *first) 2334 { 2335 struct dst_entry *dst = &first->u.dst; 2336 struct xfrm_dst *last; 2337 u32 mtu; 2338 2339 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 2340 (dst->dev && !netif_running(dst->dev))) 2341 return 0; 2342 2343 last = NULL; 2344 2345 do { 2346 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2347 2348 if (dst->xfrm->km.state != XFRM_STATE_VALID) 2349 return 0; 2350 if (xdst->xfrm_genid != dst->xfrm->genid) 2351 return 0; 2352 if (xdst->num_pols > 0 && 2353 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 2354 return 0; 2355 2356 mtu = dst_mtu(dst->child); 2357 if (xdst->child_mtu_cached != mtu) { 2358 last = xdst; 2359 xdst->child_mtu_cached = mtu; 2360 } 2361 2362 if (!dst_check(xdst->route, xdst->route_cookie)) 2363 return 0; 2364 mtu = dst_mtu(xdst->route); 2365 if (xdst->route_mtu_cached != mtu) { 2366 last = xdst; 2367 xdst->route_mtu_cached = mtu; 2368 } 2369 2370 dst = dst->child; 2371 } while (dst->xfrm); 2372 2373 if (likely(!last)) 2374 return 1; 2375 2376 mtu = last->child_mtu_cached; 2377 for (;;) { 2378 dst = &last->u.dst; 2379 2380 mtu = xfrm_state_mtu(dst->xfrm, mtu); 2381 if (mtu > last->route_mtu_cached) 2382 mtu = last->route_mtu_cached; 2383 dst_metric_set(dst, RTAX_MTU, mtu); 2384 2385 if (last == first) 2386 break; 2387 2388 last = (struct xfrm_dst *)last->u.dst.next; 2389 last->child_mtu_cached = mtu; 2390 } 2391 2392 return 1; 2393 } 2394 2395 static unsigned int xfrm_default_advmss(const struct dst_entry *dst) 2396 { 2397 return dst_metric_advmss(dst->path); 2398 } 2399 2400 static unsigned int xfrm_mtu(const struct dst_entry *dst) 2401 { 2402 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 2403 2404 return mtu ? : dst_mtu(dst->path); 2405 } 2406 2407 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr) 2408 { 2409 return dst_neigh_lookup(dst->path, daddr); 2410 } 2411 2412 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2413 { 2414 struct net *net; 2415 int err = 0; 2416 if (unlikely(afinfo == NULL)) 2417 return -EINVAL; 2418 if (unlikely(afinfo->family >= NPROTO)) 2419 return -EAFNOSUPPORT; 2420 write_lock_bh(&xfrm_policy_afinfo_lock); 2421 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 2422 err = -ENOBUFS; 2423 else { 2424 struct dst_ops *dst_ops = afinfo->dst_ops; 2425 if (likely(dst_ops->kmem_cachep == NULL)) 2426 dst_ops->kmem_cachep = xfrm_dst_cache; 2427 if (likely(dst_ops->check == NULL)) 2428 dst_ops->check = xfrm_dst_check; 2429 if (likely(dst_ops->default_advmss == NULL)) 2430 dst_ops->default_advmss = xfrm_default_advmss; 2431 if (likely(dst_ops->mtu == NULL)) 2432 dst_ops->mtu = xfrm_mtu; 2433 if (likely(dst_ops->negative_advice == NULL)) 2434 dst_ops->negative_advice = xfrm_negative_advice; 2435 if (likely(dst_ops->link_failure == NULL)) 2436 dst_ops->link_failure = xfrm_link_failure; 2437 if (likely(dst_ops->neigh_lookup == NULL)) 2438 dst_ops->neigh_lookup = xfrm_neigh_lookup; 2439 if (likely(afinfo->garbage_collect == NULL)) 2440 afinfo->garbage_collect = xfrm_garbage_collect_deferred; 2441 xfrm_policy_afinfo[afinfo->family] = afinfo; 2442 } 2443 write_unlock_bh(&xfrm_policy_afinfo_lock); 2444 2445 rtnl_lock(); 2446 for_each_net(net) { 2447 struct dst_ops *xfrm_dst_ops; 2448 2449 switch (afinfo->family) { 2450 case AF_INET: 2451 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; 2452 break; 2453 #if IS_ENABLED(CONFIG_IPV6) 2454 case AF_INET6: 2455 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; 2456 break; 2457 #endif 2458 default: 2459 BUG(); 2460 } 2461 *xfrm_dst_ops = *afinfo->dst_ops; 2462 } 2463 rtnl_unlock(); 2464 2465 return err; 2466 } 2467 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 2468 2469 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) 2470 { 2471 int err = 0; 2472 if (unlikely(afinfo == NULL)) 2473 return -EINVAL; 2474 if (unlikely(afinfo->family >= NPROTO)) 2475 return -EAFNOSUPPORT; 2476 write_lock_bh(&xfrm_policy_afinfo_lock); 2477 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 2478 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 2479 err = -EINVAL; 2480 else { 2481 struct dst_ops *dst_ops = afinfo->dst_ops; 2482 xfrm_policy_afinfo[afinfo->family] = NULL; 2483 dst_ops->kmem_cachep = NULL; 2484 dst_ops->check = NULL; 2485 dst_ops->negative_advice = NULL; 2486 dst_ops->link_failure = NULL; 2487 afinfo->garbage_collect = NULL; 2488 } 2489 } 2490 write_unlock_bh(&xfrm_policy_afinfo_lock); 2491 return err; 2492 } 2493 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2494 2495 static void __net_init xfrm_dst_ops_init(struct net *net) 2496 { 2497 struct xfrm_policy_afinfo *afinfo; 2498 2499 read_lock_bh(&xfrm_policy_afinfo_lock); 2500 afinfo = xfrm_policy_afinfo[AF_INET]; 2501 if (afinfo) 2502 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; 2503 #if IS_ENABLED(CONFIG_IPV6) 2504 afinfo = xfrm_policy_afinfo[AF_INET6]; 2505 if (afinfo) 2506 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; 2507 #endif 2508 read_unlock_bh(&xfrm_policy_afinfo_lock); 2509 } 2510 2511 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2512 { 2513 struct xfrm_policy_afinfo *afinfo; 2514 if (unlikely(family >= NPROTO)) 2515 return NULL; 2516 read_lock(&xfrm_policy_afinfo_lock); 2517 afinfo = xfrm_policy_afinfo[family]; 2518 if (unlikely(!afinfo)) 2519 read_unlock(&xfrm_policy_afinfo_lock); 2520 return afinfo; 2521 } 2522 2523 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) 2524 { 2525 read_unlock(&xfrm_policy_afinfo_lock); 2526 } 2527 2528 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 2529 { 2530 struct net_device *dev = ptr; 2531 2532 switch (event) { 2533 case NETDEV_DOWN: 2534 xfrm_garbage_collect(dev_net(dev)); 2535 } 2536 return NOTIFY_DONE; 2537 } 2538 2539 static struct notifier_block xfrm_dev_notifier = { 2540 .notifier_call = xfrm_dev_event, 2541 }; 2542 2543 #ifdef CONFIG_XFRM_STATISTICS 2544 static int __net_init xfrm_statistics_init(struct net *net) 2545 { 2546 int rv; 2547 2548 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, 2549 sizeof(struct linux_xfrm_mib), 2550 __alignof__(struct linux_xfrm_mib)) < 0) 2551 return -ENOMEM; 2552 rv = xfrm_proc_init(net); 2553 if (rv < 0) 2554 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2555 return rv; 2556 } 2557 2558 static void xfrm_statistics_fini(struct net *net) 2559 { 2560 xfrm_proc_fini(net); 2561 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2562 } 2563 #else 2564 static int __net_init xfrm_statistics_init(struct net *net) 2565 { 2566 return 0; 2567 } 2568 2569 static void xfrm_statistics_fini(struct net *net) 2570 { 2571 } 2572 #endif 2573 2574 static int __net_init xfrm_policy_init(struct net *net) 2575 { 2576 unsigned int hmask, sz; 2577 int dir; 2578 2579 if (net_eq(net, &init_net)) 2580 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 2581 sizeof(struct xfrm_dst), 2582 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2583 NULL); 2584 2585 hmask = 8 - 1; 2586 sz = (hmask+1) * sizeof(struct hlist_head); 2587 2588 net->xfrm.policy_byidx = xfrm_hash_alloc(sz); 2589 if (!net->xfrm.policy_byidx) 2590 goto out_byidx; 2591 net->xfrm.policy_idx_hmask = hmask; 2592 2593 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2594 struct xfrm_policy_hash *htab; 2595 2596 net->xfrm.policy_count[dir] = 0; 2597 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 2598 2599 htab = &net->xfrm.policy_bydst[dir]; 2600 htab->table = xfrm_hash_alloc(sz); 2601 if (!htab->table) 2602 goto out_bydst; 2603 htab->hmask = hmask; 2604 } 2605 2606 INIT_LIST_HEAD(&net->xfrm.policy_all); 2607 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 2608 if (net_eq(net, &init_net)) 2609 register_netdevice_notifier(&xfrm_dev_notifier); 2610 return 0; 2611 2612 out_bydst: 2613 for (dir--; dir >= 0; dir--) { 2614 struct xfrm_policy_hash *htab; 2615 2616 htab = &net->xfrm.policy_bydst[dir]; 2617 xfrm_hash_free(htab->table, sz); 2618 } 2619 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2620 out_byidx: 2621 return -ENOMEM; 2622 } 2623 2624 static void xfrm_policy_fini(struct net *net) 2625 { 2626 struct xfrm_audit audit_info; 2627 unsigned int sz; 2628 int dir; 2629 2630 flush_work(&net->xfrm.policy_hash_work); 2631 #ifdef CONFIG_XFRM_SUB_POLICY 2632 audit_info.loginuid = -1; 2633 audit_info.sessionid = -1; 2634 audit_info.secid = 0; 2635 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info); 2636 #endif 2637 audit_info.loginuid = -1; 2638 audit_info.sessionid = -1; 2639 audit_info.secid = 0; 2640 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); 2641 2642 WARN_ON(!list_empty(&net->xfrm.policy_all)); 2643 2644 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2645 struct xfrm_policy_hash *htab; 2646 2647 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 2648 2649 htab = &net->xfrm.policy_bydst[dir]; 2650 sz = (htab->hmask + 1); 2651 WARN_ON(!hlist_empty(htab->table)); 2652 xfrm_hash_free(htab->table, sz); 2653 } 2654 2655 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); 2656 WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); 2657 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2658 } 2659 2660 static int __net_init xfrm_net_init(struct net *net) 2661 { 2662 int rv; 2663 2664 rv = xfrm_statistics_init(net); 2665 if (rv < 0) 2666 goto out_statistics; 2667 rv = xfrm_state_init(net); 2668 if (rv < 0) 2669 goto out_state; 2670 rv = xfrm_policy_init(net); 2671 if (rv < 0) 2672 goto out_policy; 2673 xfrm_dst_ops_init(net); 2674 rv = xfrm_sysctl_init(net); 2675 if (rv < 0) 2676 goto out_sysctl; 2677 return 0; 2678 2679 out_sysctl: 2680 xfrm_policy_fini(net); 2681 out_policy: 2682 xfrm_state_fini(net); 2683 out_state: 2684 xfrm_statistics_fini(net); 2685 out_statistics: 2686 return rv; 2687 } 2688 2689 static void __net_exit xfrm_net_exit(struct net *net) 2690 { 2691 xfrm_sysctl_fini(net); 2692 xfrm_policy_fini(net); 2693 xfrm_state_fini(net); 2694 xfrm_statistics_fini(net); 2695 } 2696 2697 static struct pernet_operations __net_initdata xfrm_net_ops = { 2698 .init = xfrm_net_init, 2699 .exit = xfrm_net_exit, 2700 }; 2701 2702 void __init xfrm_init(void) 2703 { 2704 register_pernet_subsys(&xfrm_net_ops); 2705 xfrm_input_init(); 2706 } 2707 2708 #ifdef CONFIG_AUDITSYSCALL 2709 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 2710 struct audit_buffer *audit_buf) 2711 { 2712 struct xfrm_sec_ctx *ctx = xp->security; 2713 struct xfrm_selector *sel = &xp->selector; 2714 2715 if (ctx) 2716 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 2717 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 2718 2719 switch(sel->family) { 2720 case AF_INET: 2721 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); 2722 if (sel->prefixlen_s != 32) 2723 audit_log_format(audit_buf, " src_prefixlen=%d", 2724 sel->prefixlen_s); 2725 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); 2726 if (sel->prefixlen_d != 32) 2727 audit_log_format(audit_buf, " dst_prefixlen=%d", 2728 sel->prefixlen_d); 2729 break; 2730 case AF_INET6: 2731 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); 2732 if (sel->prefixlen_s != 128) 2733 audit_log_format(audit_buf, " src_prefixlen=%d", 2734 sel->prefixlen_s); 2735 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); 2736 if (sel->prefixlen_d != 128) 2737 audit_log_format(audit_buf, " dst_prefixlen=%d", 2738 sel->prefixlen_d); 2739 break; 2740 } 2741 } 2742 2743 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 2744 uid_t auid, u32 sessionid, u32 secid) 2745 { 2746 struct audit_buffer *audit_buf; 2747 2748 audit_buf = xfrm_audit_start("SPD-add"); 2749 if (audit_buf == NULL) 2750 return; 2751 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2752 audit_log_format(audit_buf, " res=%u", result); 2753 xfrm_audit_common_policyinfo(xp, audit_buf); 2754 audit_log_end(audit_buf); 2755 } 2756 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 2757 2758 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 2759 uid_t auid, u32 sessionid, u32 secid) 2760 { 2761 struct audit_buffer *audit_buf; 2762 2763 audit_buf = xfrm_audit_start("SPD-delete"); 2764 if (audit_buf == NULL) 2765 return; 2766 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2767 audit_log_format(audit_buf, " res=%u", result); 2768 xfrm_audit_common_policyinfo(xp, audit_buf); 2769 audit_log_end(audit_buf); 2770 } 2771 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 2772 #endif 2773 2774 #ifdef CONFIG_XFRM_MIGRATE 2775 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 2776 const struct xfrm_selector *sel_tgt) 2777 { 2778 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 2779 if (sel_tgt->family == sel_cmp->family && 2780 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr, 2781 sel_cmp->family) == 0 && 2782 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr, 2783 sel_cmp->family) == 0 && 2784 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 2785 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 2786 return true; 2787 } 2788 } else { 2789 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 2790 return true; 2791 } 2792 } 2793 return false; 2794 } 2795 2796 static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, 2797 u8 dir, u8 type) 2798 { 2799 struct xfrm_policy *pol, *ret = NULL; 2800 struct hlist_node *entry; 2801 struct hlist_head *chain; 2802 u32 priority = ~0U; 2803 2804 read_lock_bh(&xfrm_policy_lock); 2805 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); 2806 hlist_for_each_entry(pol, entry, chain, bydst) { 2807 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2808 pol->type == type) { 2809 ret = pol; 2810 priority = ret->priority; 2811 break; 2812 } 2813 } 2814 chain = &init_net.xfrm.policy_inexact[dir]; 2815 hlist_for_each_entry(pol, entry, chain, bydst) { 2816 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2817 pol->type == type && 2818 pol->priority < priority) { 2819 ret = pol; 2820 break; 2821 } 2822 } 2823 2824 if (ret) 2825 xfrm_pol_hold(ret); 2826 2827 read_unlock_bh(&xfrm_policy_lock); 2828 2829 return ret; 2830 } 2831 2832 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) 2833 { 2834 int match = 0; 2835 2836 if (t->mode == m->mode && t->id.proto == m->proto && 2837 (m->reqid == 0 || t->reqid == m->reqid)) { 2838 switch (t->mode) { 2839 case XFRM_MODE_TUNNEL: 2840 case XFRM_MODE_BEET: 2841 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr, 2842 m->old_family) == 0 && 2843 xfrm_addr_cmp(&t->saddr, &m->old_saddr, 2844 m->old_family) == 0) { 2845 match = 1; 2846 } 2847 break; 2848 case XFRM_MODE_TRANSPORT: 2849 /* in case of transport mode, template does not store 2850 any IP addresses, hence we just compare mode and 2851 protocol */ 2852 match = 1; 2853 break; 2854 default: 2855 break; 2856 } 2857 } 2858 return match; 2859 } 2860 2861 /* update endpoint address(es) of template(s) */ 2862 static int xfrm_policy_migrate(struct xfrm_policy *pol, 2863 struct xfrm_migrate *m, int num_migrate) 2864 { 2865 struct xfrm_migrate *mp; 2866 int i, j, n = 0; 2867 2868 write_lock_bh(&pol->lock); 2869 if (unlikely(pol->walk.dead)) { 2870 /* target policy has been deleted */ 2871 write_unlock_bh(&pol->lock); 2872 return -ENOENT; 2873 } 2874 2875 for (i = 0; i < pol->xfrm_nr; i++) { 2876 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 2877 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 2878 continue; 2879 n++; 2880 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 2881 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 2882 continue; 2883 /* update endpoints */ 2884 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 2885 sizeof(pol->xfrm_vec[i].id.daddr)); 2886 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 2887 sizeof(pol->xfrm_vec[i].saddr)); 2888 pol->xfrm_vec[i].encap_family = mp->new_family; 2889 /* flush bundles */ 2890 atomic_inc(&pol->genid); 2891 } 2892 } 2893 2894 write_unlock_bh(&pol->lock); 2895 2896 if (!n) 2897 return -ENODATA; 2898 2899 return 0; 2900 } 2901 2902 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) 2903 { 2904 int i, j; 2905 2906 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) 2907 return -EINVAL; 2908 2909 for (i = 0; i < num_migrate; i++) { 2910 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr, 2911 m[i].old_family) == 0) && 2912 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr, 2913 m[i].old_family) == 0)) 2914 return -EINVAL; 2915 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 2916 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 2917 return -EINVAL; 2918 2919 /* check if there is any duplicated entry */ 2920 for (j = i + 1; j < num_migrate; j++) { 2921 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 2922 sizeof(m[i].old_daddr)) && 2923 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 2924 sizeof(m[i].old_saddr)) && 2925 m[i].proto == m[j].proto && 2926 m[i].mode == m[j].mode && 2927 m[i].reqid == m[j].reqid && 2928 m[i].old_family == m[j].old_family) 2929 return -EINVAL; 2930 } 2931 } 2932 2933 return 0; 2934 } 2935 2936 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2937 struct xfrm_migrate *m, int num_migrate, 2938 struct xfrm_kmaddress *k) 2939 { 2940 int i, err, nx_cur = 0, nx_new = 0; 2941 struct xfrm_policy *pol = NULL; 2942 struct xfrm_state *x, *xc; 2943 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 2944 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 2945 struct xfrm_migrate *mp; 2946 2947 if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 2948 goto out; 2949 2950 /* Stage 1 - find policy */ 2951 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) { 2952 err = -ENOENT; 2953 goto out; 2954 } 2955 2956 /* Stage 2 - find and update state(s) */ 2957 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 2958 if ((x = xfrm_migrate_state_find(mp))) { 2959 x_cur[nx_cur] = x; 2960 nx_cur++; 2961 if ((xc = xfrm_state_migrate(x, mp))) { 2962 x_new[nx_new] = xc; 2963 nx_new++; 2964 } else { 2965 err = -ENODATA; 2966 goto restore_state; 2967 } 2968 } 2969 } 2970 2971 /* Stage 3 - update policy */ 2972 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) 2973 goto restore_state; 2974 2975 /* Stage 4 - delete old state(s) */ 2976 if (nx_cur) { 2977 xfrm_states_put(x_cur, nx_cur); 2978 xfrm_states_delete(x_cur, nx_cur); 2979 } 2980 2981 /* Stage 5 - announce */ 2982 km_migrate(sel, dir, type, m, num_migrate, k); 2983 2984 xfrm_pol_put(pol); 2985 2986 return 0; 2987 out: 2988 return err; 2989 2990 restore_state: 2991 if (pol) 2992 xfrm_pol_put(pol); 2993 if (nx_cur) 2994 xfrm_states_put(x_cur, nx_cur); 2995 if (nx_new) 2996 xfrm_states_delete(x_new, nx_new); 2997 2998 return err; 2999 } 3000 EXPORT_SYMBOL(xfrm_migrate); 3001 #endif 3002