1 /* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16 #include <linux/slab.h> 17 #include <linux/kmod.h> 18 #include <linux/list.h> 19 #include <linux/spinlock.h> 20 #include <linux/workqueue.h> 21 #include <linux/notifier.h> 22 #include <linux/netdevice.h> 23 #include <linux/netfilter.h> 24 #include <linux/module.h> 25 #include <net/xfrm.h> 26 #include <net/ip.h> 27 28 DEFINE_MUTEX(xfrm_cfg_mutex); 29 EXPORT_SYMBOL(xfrm_cfg_mutex); 30 31 static DEFINE_RWLOCK(xfrm_policy_lock); 32 33 struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2]; 34 EXPORT_SYMBOL(xfrm_policy_list); 35 36 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 37 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 38 39 static kmem_cache_t *xfrm_dst_cache __read_mostly; 40 41 static struct work_struct xfrm_policy_gc_work; 42 static struct list_head xfrm_policy_gc_list = 43 LIST_HEAD_INIT(xfrm_policy_gc_list); 44 static DEFINE_SPINLOCK(xfrm_policy_gc_lock); 45 46 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); 47 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 48 static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family); 49 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo); 50 51 int xfrm_register_type(struct xfrm_type *type, unsigned short family) 52 { 53 struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family); 54 struct xfrm_type **typemap; 55 int err = 0; 56 57 if (unlikely(afinfo == NULL)) 58 return -EAFNOSUPPORT; 59 typemap = afinfo->type_map; 60 61 if (likely(typemap[type->proto] == NULL)) 62 typemap[type->proto] = type; 63 else 64 err = -EEXIST; 65 xfrm_policy_unlock_afinfo(afinfo); 66 return err; 67 } 68 EXPORT_SYMBOL(xfrm_register_type); 69 70 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family) 71 { 72 struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family); 73 struct xfrm_type **typemap; 74 int err = 0; 75 76 if (unlikely(afinfo == NULL)) 77 return -EAFNOSUPPORT; 78 typemap = afinfo->type_map; 79 80 if (unlikely(typemap[type->proto] != type)) 81 err = -ENOENT; 82 else 83 typemap[type->proto] = NULL; 84 xfrm_policy_unlock_afinfo(afinfo); 85 return err; 86 } 87 EXPORT_SYMBOL(xfrm_unregister_type); 88 89 struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family) 90 { 91 struct xfrm_policy_afinfo *afinfo; 92 struct xfrm_type **typemap; 93 struct xfrm_type *type; 94 int modload_attempted = 0; 95 96 retry: 97 afinfo = xfrm_policy_get_afinfo(family); 98 if (unlikely(afinfo == NULL)) 99 return NULL; 100 typemap = afinfo->type_map; 101 102 type = typemap[proto]; 103 if (unlikely(type && !try_module_get(type->owner))) 104 type = NULL; 105 if (!type && !modload_attempted) { 106 xfrm_policy_put_afinfo(afinfo); 107 request_module("xfrm-type-%d-%d", 108 (int) family, (int) proto); 109 modload_attempted = 1; 110 goto retry; 111 } 112 113 xfrm_policy_put_afinfo(afinfo); 114 return type; 115 } 116 117 int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, 118 unsigned short family) 119 { 120 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 121 int err = 0; 122 123 if (unlikely(afinfo == NULL)) 124 return -EAFNOSUPPORT; 125 126 if (likely(afinfo->dst_lookup != NULL)) 127 err = afinfo->dst_lookup(dst, fl); 128 else 129 err = -EINVAL; 130 xfrm_policy_put_afinfo(afinfo); 131 return err; 132 } 133 EXPORT_SYMBOL(xfrm_dst_lookup); 134 135 void xfrm_put_type(struct xfrm_type *type) 136 { 137 module_put(type->owner); 138 } 139 140 int xfrm_register_mode(struct xfrm_mode *mode, int family) 141 { 142 struct xfrm_policy_afinfo *afinfo; 143 struct xfrm_mode **modemap; 144 int err; 145 146 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 147 return -EINVAL; 148 149 afinfo = xfrm_policy_lock_afinfo(family); 150 if (unlikely(afinfo == NULL)) 151 return -EAFNOSUPPORT; 152 153 err = -EEXIST; 154 modemap = afinfo->mode_map; 155 if (likely(modemap[mode->encap] == NULL)) { 156 modemap[mode->encap] = mode; 157 err = 0; 158 } 159 160 xfrm_policy_unlock_afinfo(afinfo); 161 return err; 162 } 163 EXPORT_SYMBOL(xfrm_register_mode); 164 165 int xfrm_unregister_mode(struct xfrm_mode *mode, int family) 166 { 167 struct xfrm_policy_afinfo *afinfo; 168 struct xfrm_mode **modemap; 169 int err; 170 171 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 172 return -EINVAL; 173 174 afinfo = xfrm_policy_lock_afinfo(family); 175 if (unlikely(afinfo == NULL)) 176 return -EAFNOSUPPORT; 177 178 err = -ENOENT; 179 modemap = afinfo->mode_map; 180 if (likely(modemap[mode->encap] == mode)) { 181 modemap[mode->encap] = NULL; 182 err = 0; 183 } 184 185 xfrm_policy_unlock_afinfo(afinfo); 186 return err; 187 } 188 EXPORT_SYMBOL(xfrm_unregister_mode); 189 190 struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family) 191 { 192 struct xfrm_policy_afinfo *afinfo; 193 struct xfrm_mode *mode; 194 int modload_attempted = 0; 195 196 if (unlikely(encap >= XFRM_MODE_MAX)) 197 return NULL; 198 199 retry: 200 afinfo = xfrm_policy_get_afinfo(family); 201 if (unlikely(afinfo == NULL)) 202 return NULL; 203 204 mode = afinfo->mode_map[encap]; 205 if (unlikely(mode && !try_module_get(mode->owner))) 206 mode = NULL; 207 if (!mode && !modload_attempted) { 208 xfrm_policy_put_afinfo(afinfo); 209 request_module("xfrm-mode-%d-%d", family, encap); 210 modload_attempted = 1; 211 goto retry; 212 } 213 214 xfrm_policy_put_afinfo(afinfo); 215 return mode; 216 } 217 218 void xfrm_put_mode(struct xfrm_mode *mode) 219 { 220 module_put(mode->owner); 221 } 222 223 static inline unsigned long make_jiffies(long secs) 224 { 225 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 226 return MAX_SCHEDULE_TIMEOUT-1; 227 else 228 return secs*HZ; 229 } 230 231 static void xfrm_policy_timer(unsigned long data) 232 { 233 struct xfrm_policy *xp = (struct xfrm_policy*)data; 234 unsigned long now = (unsigned long)xtime.tv_sec; 235 long next = LONG_MAX; 236 int warn = 0; 237 int dir; 238 239 read_lock(&xp->lock); 240 241 if (xp->dead) 242 goto out; 243 244 dir = xfrm_policy_id2dir(xp->index); 245 246 if (xp->lft.hard_add_expires_seconds) { 247 long tmo = xp->lft.hard_add_expires_seconds + 248 xp->curlft.add_time - now; 249 if (tmo <= 0) 250 goto expired; 251 if (tmo < next) 252 next = tmo; 253 } 254 if (xp->lft.hard_use_expires_seconds) { 255 long tmo = xp->lft.hard_use_expires_seconds + 256 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 257 if (tmo <= 0) 258 goto expired; 259 if (tmo < next) 260 next = tmo; 261 } 262 if (xp->lft.soft_add_expires_seconds) { 263 long tmo = xp->lft.soft_add_expires_seconds + 264 xp->curlft.add_time - now; 265 if (tmo <= 0) { 266 warn = 1; 267 tmo = XFRM_KM_TIMEOUT; 268 } 269 if (tmo < next) 270 next = tmo; 271 } 272 if (xp->lft.soft_use_expires_seconds) { 273 long tmo = xp->lft.soft_use_expires_seconds + 274 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 275 if (tmo <= 0) { 276 warn = 1; 277 tmo = XFRM_KM_TIMEOUT; 278 } 279 if (tmo < next) 280 next = tmo; 281 } 282 283 if (warn) 284 km_policy_expired(xp, dir, 0, 0); 285 if (next != LONG_MAX && 286 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 287 xfrm_pol_hold(xp); 288 289 out: 290 read_unlock(&xp->lock); 291 xfrm_pol_put(xp); 292 return; 293 294 expired: 295 read_unlock(&xp->lock); 296 if (!xfrm_policy_delete(xp, dir)) 297 km_policy_expired(xp, dir, 1, 0); 298 xfrm_pol_put(xp); 299 } 300 301 302 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 303 * SPD calls. 304 */ 305 306 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp) 307 { 308 struct xfrm_policy *policy; 309 310 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 311 312 if (policy) { 313 atomic_set(&policy->refcnt, 1); 314 rwlock_init(&policy->lock); 315 init_timer(&policy->timer); 316 policy->timer.data = (unsigned long)policy; 317 policy->timer.function = xfrm_policy_timer; 318 } 319 return policy; 320 } 321 EXPORT_SYMBOL(xfrm_policy_alloc); 322 323 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 324 325 void __xfrm_policy_destroy(struct xfrm_policy *policy) 326 { 327 BUG_ON(!policy->dead); 328 329 BUG_ON(policy->bundles); 330 331 if (del_timer(&policy->timer)) 332 BUG(); 333 334 security_xfrm_policy_free(policy); 335 kfree(policy); 336 } 337 EXPORT_SYMBOL(__xfrm_policy_destroy); 338 339 static void xfrm_policy_gc_kill(struct xfrm_policy *policy) 340 { 341 struct dst_entry *dst; 342 343 while ((dst = policy->bundles) != NULL) { 344 policy->bundles = dst->next; 345 dst_free(dst); 346 } 347 348 if (del_timer(&policy->timer)) 349 atomic_dec(&policy->refcnt); 350 351 if (atomic_read(&policy->refcnt) > 1) 352 flow_cache_flush(); 353 354 xfrm_pol_put(policy); 355 } 356 357 static void xfrm_policy_gc_task(void *data) 358 { 359 struct xfrm_policy *policy; 360 struct list_head *entry, *tmp; 361 struct list_head gc_list = LIST_HEAD_INIT(gc_list); 362 363 spin_lock_bh(&xfrm_policy_gc_lock); 364 list_splice_init(&xfrm_policy_gc_list, &gc_list); 365 spin_unlock_bh(&xfrm_policy_gc_lock); 366 367 list_for_each_safe(entry, tmp, &gc_list) { 368 policy = list_entry(entry, struct xfrm_policy, list); 369 xfrm_policy_gc_kill(policy); 370 } 371 } 372 373 /* Rule must be locked. Release descentant resources, announce 374 * entry dead. The rule must be unlinked from lists to the moment. 375 */ 376 377 static void xfrm_policy_kill(struct xfrm_policy *policy) 378 { 379 int dead; 380 381 write_lock_bh(&policy->lock); 382 dead = policy->dead; 383 policy->dead = 1; 384 write_unlock_bh(&policy->lock); 385 386 if (unlikely(dead)) { 387 WARN_ON(1); 388 return; 389 } 390 391 spin_lock(&xfrm_policy_gc_lock); 392 list_add(&policy->list, &xfrm_policy_gc_list); 393 spin_unlock(&xfrm_policy_gc_lock); 394 395 schedule_work(&xfrm_policy_gc_work); 396 } 397 398 /* Generate new index... KAME seems to generate them ordered by cost 399 * of an absolute inpredictability of ordering of rules. This will not pass. */ 400 static u32 xfrm_gen_index(int dir) 401 { 402 u32 idx; 403 struct xfrm_policy *p; 404 static u32 idx_generator; 405 406 for (;;) { 407 idx = (idx_generator | dir); 408 idx_generator += 8; 409 if (idx == 0) 410 idx = 8; 411 for (p = xfrm_policy_list[dir]; p; p = p->next) { 412 if (p->index == idx) 413 break; 414 } 415 if (!p) 416 return idx; 417 } 418 } 419 420 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 421 { 422 struct xfrm_policy *pol, **p; 423 struct xfrm_policy *delpol = NULL; 424 struct xfrm_policy **newpos = NULL; 425 struct dst_entry *gc_list; 426 427 write_lock_bh(&xfrm_policy_lock); 428 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL;) { 429 if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0 && 430 xfrm_sec_ctx_match(pol->security, policy->security)) { 431 if (excl) { 432 write_unlock_bh(&xfrm_policy_lock); 433 return -EEXIST; 434 } 435 *p = pol->next; 436 delpol = pol; 437 if (policy->priority > pol->priority) 438 continue; 439 } else if (policy->priority >= pol->priority) { 440 p = &pol->next; 441 continue; 442 } 443 if (!newpos) 444 newpos = p; 445 if (delpol) 446 break; 447 p = &pol->next; 448 } 449 if (newpos) 450 p = newpos; 451 xfrm_pol_hold(policy); 452 policy->next = *p; 453 *p = policy; 454 atomic_inc(&flow_cache_genid); 455 policy->index = delpol ? delpol->index : xfrm_gen_index(dir); 456 policy->curlft.add_time = (unsigned long)xtime.tv_sec; 457 policy->curlft.use_time = 0; 458 if (!mod_timer(&policy->timer, jiffies + HZ)) 459 xfrm_pol_hold(policy); 460 write_unlock_bh(&xfrm_policy_lock); 461 462 if (delpol) 463 xfrm_policy_kill(delpol); 464 465 read_lock_bh(&xfrm_policy_lock); 466 gc_list = NULL; 467 for (policy = policy->next; policy; policy = policy->next) { 468 struct dst_entry *dst; 469 470 write_lock(&policy->lock); 471 dst = policy->bundles; 472 if (dst) { 473 struct dst_entry *tail = dst; 474 while (tail->next) 475 tail = tail->next; 476 tail->next = gc_list; 477 gc_list = dst; 478 479 policy->bundles = NULL; 480 } 481 write_unlock(&policy->lock); 482 } 483 read_unlock_bh(&xfrm_policy_lock); 484 485 while (gc_list) { 486 struct dst_entry *dst = gc_list; 487 488 gc_list = dst->next; 489 dst_free(dst); 490 } 491 492 return 0; 493 } 494 EXPORT_SYMBOL(xfrm_policy_insert); 495 496 struct xfrm_policy *xfrm_policy_bysel_ctx(int dir, struct xfrm_selector *sel, 497 struct xfrm_sec_ctx *ctx, int delete) 498 { 499 struct xfrm_policy *pol, **p; 500 501 write_lock_bh(&xfrm_policy_lock); 502 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { 503 if ((memcmp(sel, &pol->selector, sizeof(*sel)) == 0) && 504 (xfrm_sec_ctx_match(ctx, pol->security))) { 505 xfrm_pol_hold(pol); 506 if (delete) 507 *p = pol->next; 508 break; 509 } 510 } 511 write_unlock_bh(&xfrm_policy_lock); 512 513 if (pol && delete) { 514 atomic_inc(&flow_cache_genid); 515 xfrm_policy_kill(pol); 516 } 517 return pol; 518 } 519 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 520 521 struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete) 522 { 523 struct xfrm_policy *pol, **p; 524 525 write_lock_bh(&xfrm_policy_lock); 526 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { 527 if (pol->index == id) { 528 xfrm_pol_hold(pol); 529 if (delete) 530 *p = pol->next; 531 break; 532 } 533 } 534 write_unlock_bh(&xfrm_policy_lock); 535 536 if (pol && delete) { 537 atomic_inc(&flow_cache_genid); 538 xfrm_policy_kill(pol); 539 } 540 return pol; 541 } 542 EXPORT_SYMBOL(xfrm_policy_byid); 543 544 void xfrm_policy_flush(void) 545 { 546 struct xfrm_policy *xp; 547 int dir; 548 549 write_lock_bh(&xfrm_policy_lock); 550 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 551 while ((xp = xfrm_policy_list[dir]) != NULL) { 552 xfrm_policy_list[dir] = xp->next; 553 write_unlock_bh(&xfrm_policy_lock); 554 555 xfrm_policy_kill(xp); 556 557 write_lock_bh(&xfrm_policy_lock); 558 } 559 } 560 atomic_inc(&flow_cache_genid); 561 write_unlock_bh(&xfrm_policy_lock); 562 } 563 EXPORT_SYMBOL(xfrm_policy_flush); 564 565 int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), 566 void *data) 567 { 568 struct xfrm_policy *xp; 569 int dir; 570 int count = 0; 571 int error = 0; 572 573 read_lock_bh(&xfrm_policy_lock); 574 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { 575 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) 576 count++; 577 } 578 579 if (count == 0) { 580 error = -ENOENT; 581 goto out; 582 } 583 584 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { 585 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) { 586 error = func(xp, dir%XFRM_POLICY_MAX, --count, data); 587 if (error) 588 goto out; 589 } 590 } 591 592 out: 593 read_unlock_bh(&xfrm_policy_lock); 594 return error; 595 } 596 EXPORT_SYMBOL(xfrm_policy_walk); 597 598 /* Find policy to apply to this flow. */ 599 600 static void xfrm_policy_lookup(struct flowi *fl, u32 sk_sid, u16 family, u8 dir, 601 void **objp, atomic_t **obj_refp) 602 { 603 struct xfrm_policy *pol; 604 605 read_lock_bh(&xfrm_policy_lock); 606 for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) { 607 struct xfrm_selector *sel = &pol->selector; 608 int match; 609 610 if (pol->family != family) 611 continue; 612 613 match = xfrm_selector_match(sel, fl, family); 614 615 if (match) { 616 if (!security_xfrm_policy_lookup(pol, sk_sid, dir)) { 617 xfrm_pol_hold(pol); 618 break; 619 } 620 } 621 } 622 read_unlock_bh(&xfrm_policy_lock); 623 if ((*objp = (void *) pol) != NULL) 624 *obj_refp = &pol->refcnt; 625 } 626 627 static inline int policy_to_flow_dir(int dir) 628 { 629 if (XFRM_POLICY_IN == FLOW_DIR_IN && 630 XFRM_POLICY_OUT == FLOW_DIR_OUT && 631 XFRM_POLICY_FWD == FLOW_DIR_FWD) 632 return dir; 633 switch (dir) { 634 default: 635 case XFRM_POLICY_IN: 636 return FLOW_DIR_IN; 637 case XFRM_POLICY_OUT: 638 return FLOW_DIR_OUT; 639 case XFRM_POLICY_FWD: 640 return FLOW_DIR_FWD; 641 }; 642 } 643 644 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl, u32 sk_sid) 645 { 646 struct xfrm_policy *pol; 647 648 read_lock_bh(&xfrm_policy_lock); 649 if ((pol = sk->sk_policy[dir]) != NULL) { 650 int match = xfrm_selector_match(&pol->selector, fl, 651 sk->sk_family); 652 int err = 0; 653 654 if (match) 655 err = security_xfrm_policy_lookup(pol, sk_sid, policy_to_flow_dir(dir)); 656 657 if (match && !err) 658 xfrm_pol_hold(pol); 659 else 660 pol = NULL; 661 } 662 read_unlock_bh(&xfrm_policy_lock); 663 return pol; 664 } 665 666 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 667 { 668 pol->next = xfrm_policy_list[dir]; 669 xfrm_policy_list[dir] = pol; 670 xfrm_pol_hold(pol); 671 } 672 673 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 674 int dir) 675 { 676 struct xfrm_policy **polp; 677 678 for (polp = &xfrm_policy_list[dir]; 679 *polp != NULL; polp = &(*polp)->next) { 680 if (*polp == pol) { 681 *polp = pol->next; 682 return pol; 683 } 684 } 685 return NULL; 686 } 687 688 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 689 { 690 write_lock_bh(&xfrm_policy_lock); 691 pol = __xfrm_policy_unlink(pol, dir); 692 write_unlock_bh(&xfrm_policy_lock); 693 if (pol) { 694 if (dir < XFRM_POLICY_MAX) 695 atomic_inc(&flow_cache_genid); 696 xfrm_policy_kill(pol); 697 return 0; 698 } 699 return -ENOENT; 700 } 701 EXPORT_SYMBOL(xfrm_policy_delete); 702 703 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 704 { 705 struct xfrm_policy *old_pol; 706 707 write_lock_bh(&xfrm_policy_lock); 708 old_pol = sk->sk_policy[dir]; 709 sk->sk_policy[dir] = pol; 710 if (pol) { 711 pol->curlft.add_time = (unsigned long)xtime.tv_sec; 712 pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir); 713 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 714 } 715 if (old_pol) 716 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 717 write_unlock_bh(&xfrm_policy_lock); 718 719 if (old_pol) { 720 xfrm_policy_kill(old_pol); 721 } 722 return 0; 723 } 724 725 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir) 726 { 727 struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC); 728 729 if (newp) { 730 newp->selector = old->selector; 731 if (security_xfrm_policy_clone(old, newp)) { 732 kfree(newp); 733 return NULL; /* ENOMEM */ 734 } 735 newp->lft = old->lft; 736 newp->curlft = old->curlft; 737 newp->action = old->action; 738 newp->flags = old->flags; 739 newp->xfrm_nr = old->xfrm_nr; 740 newp->index = old->index; 741 memcpy(newp->xfrm_vec, old->xfrm_vec, 742 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 743 write_lock_bh(&xfrm_policy_lock); 744 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); 745 write_unlock_bh(&xfrm_policy_lock); 746 xfrm_pol_put(newp); 747 } 748 return newp; 749 } 750 751 int __xfrm_sk_clone_policy(struct sock *sk) 752 { 753 struct xfrm_policy *p0 = sk->sk_policy[0], 754 *p1 = sk->sk_policy[1]; 755 756 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 757 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 758 return -ENOMEM; 759 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 760 return -ENOMEM; 761 return 0; 762 } 763 764 /* Resolve list of templates for the flow, given policy. */ 765 766 static int 767 xfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl, 768 struct xfrm_state **xfrm, 769 unsigned short family) 770 { 771 int nx; 772 int i, error; 773 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 774 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 775 776 for (nx=0, i = 0; i < policy->xfrm_nr; i++) { 777 struct xfrm_state *x; 778 xfrm_address_t *remote = daddr; 779 xfrm_address_t *local = saddr; 780 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 781 782 if (tmpl->mode) { 783 remote = &tmpl->id.daddr; 784 local = &tmpl->saddr; 785 } 786 787 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 788 789 if (x && x->km.state == XFRM_STATE_VALID) { 790 xfrm[nx++] = x; 791 daddr = remote; 792 saddr = local; 793 continue; 794 } 795 if (x) { 796 error = (x->km.state == XFRM_STATE_ERROR ? 797 -EINVAL : -EAGAIN); 798 xfrm_state_put(x); 799 } 800 801 if (!tmpl->optional) 802 goto fail; 803 } 804 return nx; 805 806 fail: 807 for (nx--; nx>=0; nx--) 808 xfrm_state_put(xfrm[nx]); 809 return error; 810 } 811 812 /* Check that the bundle accepts the flow and its components are 813 * still valid. 814 */ 815 816 static struct dst_entry * 817 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family) 818 { 819 struct dst_entry *x; 820 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 821 if (unlikely(afinfo == NULL)) 822 return ERR_PTR(-EINVAL); 823 x = afinfo->find_bundle(fl, policy); 824 xfrm_policy_put_afinfo(afinfo); 825 return x; 826 } 827 828 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 829 * all the metrics... Shortly, bundle a bundle. 830 */ 831 832 static int 833 xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx, 834 struct flowi *fl, struct dst_entry **dst_p, 835 unsigned short family) 836 { 837 int err; 838 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 839 if (unlikely(afinfo == NULL)) 840 return -EINVAL; 841 err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p); 842 xfrm_policy_put_afinfo(afinfo); 843 return err; 844 } 845 846 847 static int stale_bundle(struct dst_entry *dst); 848 849 /* Main function: finds/creates a bundle for given flow. 850 * 851 * At the moment we eat a raw IP route. Mostly to speed up lookups 852 * on interfaces with disabled IPsec. 853 */ 854 int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, 855 struct sock *sk, int flags) 856 { 857 struct xfrm_policy *policy; 858 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 859 struct dst_entry *dst, *dst_orig = *dst_p; 860 int nx = 0; 861 int err; 862 u32 genid; 863 u16 family; 864 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 865 u32 sk_sid = security_sk_sid(sk, fl, dir); 866 restart: 867 genid = atomic_read(&flow_cache_genid); 868 policy = NULL; 869 if (sk && sk->sk_policy[1]) 870 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, sk_sid); 871 872 if (!policy) { 873 /* To accelerate a bit... */ 874 if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT]) 875 return 0; 876 877 policy = flow_cache_lookup(fl, sk_sid, dst_orig->ops->family, 878 dir, xfrm_policy_lookup); 879 } 880 881 if (!policy) 882 return 0; 883 884 family = dst_orig->ops->family; 885 policy->curlft.use_time = (unsigned long)xtime.tv_sec; 886 887 switch (policy->action) { 888 case XFRM_POLICY_BLOCK: 889 /* Prohibit the flow */ 890 err = -EPERM; 891 goto error; 892 893 case XFRM_POLICY_ALLOW: 894 if (policy->xfrm_nr == 0) { 895 /* Flow passes not transformed. */ 896 xfrm_pol_put(policy); 897 return 0; 898 } 899 900 /* Try to find matching bundle. 901 * 902 * LATER: help from flow cache. It is optional, this 903 * is required only for output policy. 904 */ 905 dst = xfrm_find_bundle(fl, policy, family); 906 if (IS_ERR(dst)) { 907 err = PTR_ERR(dst); 908 goto error; 909 } 910 911 if (dst) 912 break; 913 914 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family); 915 916 if (unlikely(nx<0)) { 917 err = nx; 918 if (err == -EAGAIN && flags) { 919 DECLARE_WAITQUEUE(wait, current); 920 921 add_wait_queue(&km_waitq, &wait); 922 set_current_state(TASK_INTERRUPTIBLE); 923 schedule(); 924 set_current_state(TASK_RUNNING); 925 remove_wait_queue(&km_waitq, &wait); 926 927 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family); 928 929 if (nx == -EAGAIN && signal_pending(current)) { 930 err = -ERESTART; 931 goto error; 932 } 933 if (nx == -EAGAIN || 934 genid != atomic_read(&flow_cache_genid)) { 935 xfrm_pol_put(policy); 936 goto restart; 937 } 938 err = nx; 939 } 940 if (err < 0) 941 goto error; 942 } 943 if (nx == 0) { 944 /* Flow passes not transformed. */ 945 xfrm_pol_put(policy); 946 return 0; 947 } 948 949 dst = dst_orig; 950 err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family); 951 952 if (unlikely(err)) { 953 int i; 954 for (i=0; i<nx; i++) 955 xfrm_state_put(xfrm[i]); 956 goto error; 957 } 958 959 write_lock_bh(&policy->lock); 960 if (unlikely(policy->dead || stale_bundle(dst))) { 961 /* Wow! While we worked on resolving, this 962 * policy has gone. Retry. It is not paranoia, 963 * we just cannot enlist new bundle to dead object. 964 * We can't enlist stable bundles either. 965 */ 966 write_unlock_bh(&policy->lock); 967 if (dst) 968 dst_free(dst); 969 970 err = -EHOSTUNREACH; 971 goto error; 972 } 973 dst->next = policy->bundles; 974 policy->bundles = dst; 975 dst_hold(dst); 976 write_unlock_bh(&policy->lock); 977 } 978 *dst_p = dst; 979 dst_release(dst_orig); 980 xfrm_pol_put(policy); 981 return 0; 982 983 error: 984 dst_release(dst_orig); 985 xfrm_pol_put(policy); 986 *dst_p = NULL; 987 return err; 988 } 989 EXPORT_SYMBOL(xfrm_lookup); 990 991 /* When skb is transformed back to its "native" form, we have to 992 * check policy restrictions. At the moment we make this in maximally 993 * stupid way. Shame on me. :-) Of course, connected sockets must 994 * have policy cached at them. 995 */ 996 997 static inline int 998 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x, 999 unsigned short family) 1000 { 1001 if (xfrm_state_kern(x)) 1002 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family); 1003 return x->id.proto == tmpl->id.proto && 1004 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1005 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1006 x->props.mode == tmpl->mode && 1007 (tmpl->aalgos & (1<<x->props.aalgo)) && 1008 !(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family)); 1009 } 1010 1011 static inline int 1012 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start, 1013 unsigned short family) 1014 { 1015 int idx = start; 1016 1017 if (tmpl->optional) { 1018 if (!tmpl->mode) 1019 return start; 1020 } else 1021 start = -1; 1022 for (; idx < sp->len; idx++) { 1023 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 1024 return ++idx; 1025 if (sp->xvec[idx]->props.mode) 1026 break; 1027 } 1028 return start; 1029 } 1030 1031 int 1032 xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family) 1033 { 1034 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1035 1036 if (unlikely(afinfo == NULL)) 1037 return -EAFNOSUPPORT; 1038 1039 afinfo->decode_session(skb, fl); 1040 xfrm_policy_put_afinfo(afinfo); 1041 return 0; 1042 } 1043 EXPORT_SYMBOL(xfrm_decode_session); 1044 1045 static inline int secpath_has_tunnel(struct sec_path *sp, int k) 1046 { 1047 for (; k < sp->len; k++) { 1048 if (sp->xvec[k]->props.mode) 1049 return 1; 1050 } 1051 1052 return 0; 1053 } 1054 1055 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 1056 unsigned short family) 1057 { 1058 struct xfrm_policy *pol; 1059 struct flowi fl; 1060 u8 fl_dir = policy_to_flow_dir(dir); 1061 u32 sk_sid; 1062 1063 if (xfrm_decode_session(skb, &fl, family) < 0) 1064 return 0; 1065 nf_nat_decode_session(skb, &fl, family); 1066 1067 sk_sid = security_sk_sid(sk, &fl, fl_dir); 1068 1069 /* First, check used SA against their selectors. */ 1070 if (skb->sp) { 1071 int i; 1072 1073 for (i=skb->sp->len-1; i>=0; i--) { 1074 struct xfrm_state *x = skb->sp->xvec[i]; 1075 if (!xfrm_selector_match(&x->sel, &fl, family)) 1076 return 0; 1077 } 1078 } 1079 1080 pol = NULL; 1081 if (sk && sk->sk_policy[dir]) 1082 pol = xfrm_sk_policy_lookup(sk, dir, &fl, sk_sid); 1083 1084 if (!pol) 1085 pol = flow_cache_lookup(&fl, sk_sid, family, fl_dir, 1086 xfrm_policy_lookup); 1087 1088 if (!pol) 1089 return !skb->sp || !secpath_has_tunnel(skb->sp, 0); 1090 1091 pol->curlft.use_time = (unsigned long)xtime.tv_sec; 1092 1093 if (pol->action == XFRM_POLICY_ALLOW) { 1094 struct sec_path *sp; 1095 static struct sec_path dummy; 1096 int i, k; 1097 1098 if ((sp = skb->sp) == NULL) 1099 sp = &dummy; 1100 1101 /* For each tunnel xfrm, find the first matching tmpl. 1102 * For each tmpl before that, find corresponding xfrm. 1103 * Order is _important_. Later we will implement 1104 * some barriers, but at the moment barriers 1105 * are implied between each two transformations. 1106 */ 1107 for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) { 1108 k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family); 1109 if (k < 0) 1110 goto reject; 1111 } 1112 1113 if (secpath_has_tunnel(sp, k)) 1114 goto reject; 1115 1116 xfrm_pol_put(pol); 1117 return 1; 1118 } 1119 1120 reject: 1121 xfrm_pol_put(pol); 1122 return 0; 1123 } 1124 EXPORT_SYMBOL(__xfrm_policy_check); 1125 1126 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 1127 { 1128 struct flowi fl; 1129 1130 if (xfrm_decode_session(skb, &fl, family) < 0) 1131 return 0; 1132 1133 return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0; 1134 } 1135 EXPORT_SYMBOL(__xfrm_route_forward); 1136 1137 /* Optimize later using cookies and generation ids. */ 1138 1139 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 1140 { 1141 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 1142 * to "-1" to force all XFRM destinations to get validated by 1143 * dst_ops->check on every use. We do this because when a 1144 * normal route referenced by an XFRM dst is obsoleted we do 1145 * not go looking around for all parent referencing XFRM dsts 1146 * so that we can invalidate them. It is just too much work. 1147 * Instead we make the checks here on every use. For example: 1148 * 1149 * XFRM dst A --> IPv4 dst X 1150 * 1151 * X is the "xdst->route" of A (X is also the "dst->path" of A 1152 * in this example). If X is marked obsolete, "A" will not 1153 * notice. That's what we are validating here via the 1154 * stale_bundle() check. 1155 * 1156 * When a policy's bundle is pruned, we dst_free() the XFRM 1157 * dst which causes it's ->obsolete field to be set to a 1158 * positive non-zero integer. If an XFRM dst has been pruned 1159 * like this, we want to force a new route lookup. 1160 */ 1161 if (dst->obsolete < 0 && !stale_bundle(dst)) 1162 return dst; 1163 1164 return NULL; 1165 } 1166 1167 static int stale_bundle(struct dst_entry *dst) 1168 { 1169 return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC); 1170 } 1171 1172 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 1173 { 1174 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 1175 dst->dev = &loopback_dev; 1176 dev_hold(&loopback_dev); 1177 dev_put(dev); 1178 } 1179 } 1180 EXPORT_SYMBOL(xfrm_dst_ifdown); 1181 1182 static void xfrm_link_failure(struct sk_buff *skb) 1183 { 1184 /* Impossible. Such dst must be popped before reaches point of failure. */ 1185 return; 1186 } 1187 1188 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 1189 { 1190 if (dst) { 1191 if (dst->obsolete) { 1192 dst_release(dst); 1193 dst = NULL; 1194 } 1195 } 1196 return dst; 1197 } 1198 1199 static void xfrm_prune_bundles(int (*func)(struct dst_entry *)) 1200 { 1201 int i; 1202 struct xfrm_policy *pol; 1203 struct dst_entry *dst, **dstp, *gc_list = NULL; 1204 1205 read_lock_bh(&xfrm_policy_lock); 1206 for (i=0; i<2*XFRM_POLICY_MAX; i++) { 1207 for (pol = xfrm_policy_list[i]; pol; pol = pol->next) { 1208 write_lock(&pol->lock); 1209 dstp = &pol->bundles; 1210 while ((dst=*dstp) != NULL) { 1211 if (func(dst)) { 1212 *dstp = dst->next; 1213 dst->next = gc_list; 1214 gc_list = dst; 1215 } else { 1216 dstp = &dst->next; 1217 } 1218 } 1219 write_unlock(&pol->lock); 1220 } 1221 } 1222 read_unlock_bh(&xfrm_policy_lock); 1223 1224 while (gc_list) { 1225 dst = gc_list; 1226 gc_list = dst->next; 1227 dst_free(dst); 1228 } 1229 } 1230 1231 static int unused_bundle(struct dst_entry *dst) 1232 { 1233 return !atomic_read(&dst->__refcnt); 1234 } 1235 1236 static void __xfrm_garbage_collect(void) 1237 { 1238 xfrm_prune_bundles(unused_bundle); 1239 } 1240 1241 int xfrm_flush_bundles(void) 1242 { 1243 xfrm_prune_bundles(stale_bundle); 1244 return 0; 1245 } 1246 1247 static int always_true(struct dst_entry *dst) 1248 { 1249 return 1; 1250 } 1251 1252 void xfrm_flush_all_bundles(void) 1253 { 1254 xfrm_prune_bundles(always_true); 1255 } 1256 1257 void xfrm_init_pmtu(struct dst_entry *dst) 1258 { 1259 do { 1260 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1261 u32 pmtu, route_mtu_cached; 1262 1263 pmtu = dst_mtu(dst->child); 1264 xdst->child_mtu_cached = pmtu; 1265 1266 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 1267 1268 route_mtu_cached = dst_mtu(xdst->route); 1269 xdst->route_mtu_cached = route_mtu_cached; 1270 1271 if (pmtu > route_mtu_cached) 1272 pmtu = route_mtu_cached; 1273 1274 dst->metrics[RTAX_MTU-1] = pmtu; 1275 } while ((dst = dst->next)); 1276 } 1277 1278 EXPORT_SYMBOL(xfrm_init_pmtu); 1279 1280 /* Check that the bundle accepts the flow and its components are 1281 * still valid. 1282 */ 1283 1284 int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family) 1285 { 1286 struct dst_entry *dst = &first->u.dst; 1287 struct xfrm_dst *last; 1288 u32 mtu; 1289 1290 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 1291 (dst->dev && !netif_running(dst->dev))) 1292 return 0; 1293 1294 last = NULL; 1295 1296 do { 1297 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1298 1299 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family)) 1300 return 0; 1301 if (dst->xfrm->km.state != XFRM_STATE_VALID) 1302 return 0; 1303 1304 mtu = dst_mtu(dst->child); 1305 if (xdst->child_mtu_cached != mtu) { 1306 last = xdst; 1307 xdst->child_mtu_cached = mtu; 1308 } 1309 1310 if (!dst_check(xdst->route, xdst->route_cookie)) 1311 return 0; 1312 mtu = dst_mtu(xdst->route); 1313 if (xdst->route_mtu_cached != mtu) { 1314 last = xdst; 1315 xdst->route_mtu_cached = mtu; 1316 } 1317 1318 dst = dst->child; 1319 } while (dst->xfrm); 1320 1321 if (likely(!last)) 1322 return 1; 1323 1324 mtu = last->child_mtu_cached; 1325 for (;;) { 1326 dst = &last->u.dst; 1327 1328 mtu = xfrm_state_mtu(dst->xfrm, mtu); 1329 if (mtu > last->route_mtu_cached) 1330 mtu = last->route_mtu_cached; 1331 dst->metrics[RTAX_MTU-1] = mtu; 1332 1333 if (last == first) 1334 break; 1335 1336 last = last->u.next; 1337 last->child_mtu_cached = mtu; 1338 } 1339 1340 return 1; 1341 } 1342 1343 EXPORT_SYMBOL(xfrm_bundle_ok); 1344 1345 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 1346 { 1347 int err = 0; 1348 if (unlikely(afinfo == NULL)) 1349 return -EINVAL; 1350 if (unlikely(afinfo->family >= NPROTO)) 1351 return -EAFNOSUPPORT; 1352 write_lock_bh(&xfrm_policy_afinfo_lock); 1353 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 1354 err = -ENOBUFS; 1355 else { 1356 struct dst_ops *dst_ops = afinfo->dst_ops; 1357 if (likely(dst_ops->kmem_cachep == NULL)) 1358 dst_ops->kmem_cachep = xfrm_dst_cache; 1359 if (likely(dst_ops->check == NULL)) 1360 dst_ops->check = xfrm_dst_check; 1361 if (likely(dst_ops->negative_advice == NULL)) 1362 dst_ops->negative_advice = xfrm_negative_advice; 1363 if (likely(dst_ops->link_failure == NULL)) 1364 dst_ops->link_failure = xfrm_link_failure; 1365 if (likely(afinfo->garbage_collect == NULL)) 1366 afinfo->garbage_collect = __xfrm_garbage_collect; 1367 xfrm_policy_afinfo[afinfo->family] = afinfo; 1368 } 1369 write_unlock_bh(&xfrm_policy_afinfo_lock); 1370 return err; 1371 } 1372 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 1373 1374 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) 1375 { 1376 int err = 0; 1377 if (unlikely(afinfo == NULL)) 1378 return -EINVAL; 1379 if (unlikely(afinfo->family >= NPROTO)) 1380 return -EAFNOSUPPORT; 1381 write_lock_bh(&xfrm_policy_afinfo_lock); 1382 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 1383 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 1384 err = -EINVAL; 1385 else { 1386 struct dst_ops *dst_ops = afinfo->dst_ops; 1387 xfrm_policy_afinfo[afinfo->family] = NULL; 1388 dst_ops->kmem_cachep = NULL; 1389 dst_ops->check = NULL; 1390 dst_ops->negative_advice = NULL; 1391 dst_ops->link_failure = NULL; 1392 afinfo->garbage_collect = NULL; 1393 } 1394 } 1395 write_unlock_bh(&xfrm_policy_afinfo_lock); 1396 return err; 1397 } 1398 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 1399 1400 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 1401 { 1402 struct xfrm_policy_afinfo *afinfo; 1403 if (unlikely(family >= NPROTO)) 1404 return NULL; 1405 read_lock(&xfrm_policy_afinfo_lock); 1406 afinfo = xfrm_policy_afinfo[family]; 1407 if (unlikely(!afinfo)) 1408 read_unlock(&xfrm_policy_afinfo_lock); 1409 return afinfo; 1410 } 1411 1412 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) 1413 { 1414 read_unlock(&xfrm_policy_afinfo_lock); 1415 } 1416 1417 static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family) 1418 { 1419 struct xfrm_policy_afinfo *afinfo; 1420 if (unlikely(family >= NPROTO)) 1421 return NULL; 1422 write_lock_bh(&xfrm_policy_afinfo_lock); 1423 afinfo = xfrm_policy_afinfo[family]; 1424 if (unlikely(!afinfo)) 1425 write_unlock_bh(&xfrm_policy_afinfo_lock); 1426 return afinfo; 1427 } 1428 1429 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo) 1430 { 1431 write_unlock_bh(&xfrm_policy_afinfo_lock); 1432 } 1433 1434 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 1435 { 1436 switch (event) { 1437 case NETDEV_DOWN: 1438 xfrm_flush_bundles(); 1439 } 1440 return NOTIFY_DONE; 1441 } 1442 1443 static struct notifier_block xfrm_dev_notifier = { 1444 xfrm_dev_event, 1445 NULL, 1446 0 1447 }; 1448 1449 static void __init xfrm_policy_init(void) 1450 { 1451 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 1452 sizeof(struct xfrm_dst), 1453 0, SLAB_HWCACHE_ALIGN, 1454 NULL, NULL); 1455 if (!xfrm_dst_cache) 1456 panic("XFRM: failed to allocate xfrm_dst_cache\n"); 1457 1458 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); 1459 register_netdevice_notifier(&xfrm_dev_notifier); 1460 } 1461 1462 void __init xfrm_init(void) 1463 { 1464 xfrm_state_init(); 1465 xfrm_policy_init(); 1466 xfrm_input_init(); 1467 } 1468 1469