1 /* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16 #include <linux/config.h> 17 #include <linux/slab.h> 18 #include <linux/kmod.h> 19 #include <linux/list.h> 20 #include <linux/spinlock.h> 21 #include <linux/workqueue.h> 22 #include <linux/notifier.h> 23 #include <linux/netdevice.h> 24 #include <linux/netfilter.h> 25 #include <linux/module.h> 26 #include <net/xfrm.h> 27 #include <net/ip.h> 28 29 DEFINE_MUTEX(xfrm_cfg_mutex); 30 EXPORT_SYMBOL(xfrm_cfg_mutex); 31 32 static DEFINE_RWLOCK(xfrm_policy_lock); 33 34 struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2]; 35 EXPORT_SYMBOL(xfrm_policy_list); 36 37 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 38 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 39 40 static kmem_cache_t *xfrm_dst_cache __read_mostly; 41 42 static struct work_struct xfrm_policy_gc_work; 43 static struct list_head xfrm_policy_gc_list = 44 LIST_HEAD_INIT(xfrm_policy_gc_list); 45 static DEFINE_SPINLOCK(xfrm_policy_gc_lock); 46 47 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); 48 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 49 static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family); 50 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo); 51 52 int xfrm_register_type(struct xfrm_type *type, unsigned short family) 53 { 54 struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family); 55 struct xfrm_type **typemap; 56 int err = 0; 57 58 if (unlikely(afinfo == NULL)) 59 return -EAFNOSUPPORT; 60 typemap = afinfo->type_map; 61 62 if (likely(typemap[type->proto] == NULL)) 63 typemap[type->proto] = type; 64 else 65 err = -EEXIST; 66 xfrm_policy_unlock_afinfo(afinfo); 67 return err; 68 } 69 EXPORT_SYMBOL(xfrm_register_type); 70 71 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family) 72 { 73 struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family); 74 struct xfrm_type **typemap; 75 int err = 0; 76 77 if (unlikely(afinfo == NULL)) 78 return -EAFNOSUPPORT; 79 typemap = afinfo->type_map; 80 81 if (unlikely(typemap[type->proto] != type)) 82 err = -ENOENT; 83 else 84 typemap[type->proto] = NULL; 85 xfrm_policy_unlock_afinfo(afinfo); 86 return err; 87 } 88 EXPORT_SYMBOL(xfrm_unregister_type); 89 90 struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family) 91 { 92 struct xfrm_policy_afinfo *afinfo; 93 struct xfrm_type **typemap; 94 struct xfrm_type *type; 95 int modload_attempted = 0; 96 97 retry: 98 afinfo = xfrm_policy_get_afinfo(family); 99 if (unlikely(afinfo == NULL)) 100 return NULL; 101 typemap = afinfo->type_map; 102 103 type = typemap[proto]; 104 if (unlikely(type && !try_module_get(type->owner))) 105 type = NULL; 106 if (!type && !modload_attempted) { 107 xfrm_policy_put_afinfo(afinfo); 108 request_module("xfrm-type-%d-%d", 109 (int) family, (int) proto); 110 modload_attempted = 1; 111 goto retry; 112 } 113 114 xfrm_policy_put_afinfo(afinfo); 115 return type; 116 } 117 118 int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, 119 unsigned short family) 120 { 121 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 122 int err = 0; 123 124 if (unlikely(afinfo == NULL)) 125 return -EAFNOSUPPORT; 126 127 if (likely(afinfo->dst_lookup != NULL)) 128 err = afinfo->dst_lookup(dst, fl); 129 else 130 err = -EINVAL; 131 xfrm_policy_put_afinfo(afinfo); 132 return err; 133 } 134 EXPORT_SYMBOL(xfrm_dst_lookup); 135 136 void xfrm_put_type(struct xfrm_type *type) 137 { 138 module_put(type->owner); 139 } 140 141 int xfrm_register_mode(struct xfrm_mode *mode, int family) 142 { 143 struct xfrm_policy_afinfo *afinfo; 144 struct xfrm_mode **modemap; 145 int err; 146 147 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 148 return -EINVAL; 149 150 afinfo = xfrm_policy_lock_afinfo(family); 151 if (unlikely(afinfo == NULL)) 152 return -EAFNOSUPPORT; 153 154 err = -EEXIST; 155 modemap = afinfo->mode_map; 156 if (likely(modemap[mode->encap] == NULL)) { 157 modemap[mode->encap] = mode; 158 err = 0; 159 } 160 161 xfrm_policy_unlock_afinfo(afinfo); 162 return err; 163 } 164 EXPORT_SYMBOL(xfrm_register_mode); 165 166 int xfrm_unregister_mode(struct xfrm_mode *mode, int family) 167 { 168 struct xfrm_policy_afinfo *afinfo; 169 struct xfrm_mode **modemap; 170 int err; 171 172 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 173 return -EINVAL; 174 175 afinfo = xfrm_policy_lock_afinfo(family); 176 if (unlikely(afinfo == NULL)) 177 return -EAFNOSUPPORT; 178 179 err = -ENOENT; 180 modemap = afinfo->mode_map; 181 if (likely(modemap[mode->encap] == mode)) { 182 modemap[mode->encap] = NULL; 183 err = 0; 184 } 185 186 xfrm_policy_unlock_afinfo(afinfo); 187 return err; 188 } 189 EXPORT_SYMBOL(xfrm_unregister_mode); 190 191 struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family) 192 { 193 struct xfrm_policy_afinfo *afinfo; 194 struct xfrm_mode *mode; 195 int modload_attempted = 0; 196 197 if (unlikely(encap >= XFRM_MODE_MAX)) 198 return NULL; 199 200 retry: 201 afinfo = xfrm_policy_get_afinfo(family); 202 if (unlikely(afinfo == NULL)) 203 return NULL; 204 205 mode = afinfo->mode_map[encap]; 206 if (unlikely(mode && !try_module_get(mode->owner))) 207 mode = NULL; 208 if (!mode && !modload_attempted) { 209 xfrm_policy_put_afinfo(afinfo); 210 request_module("xfrm-mode-%d-%d", family, encap); 211 modload_attempted = 1; 212 goto retry; 213 } 214 215 xfrm_policy_put_afinfo(afinfo); 216 return mode; 217 } 218 219 void xfrm_put_mode(struct xfrm_mode *mode) 220 { 221 module_put(mode->owner); 222 } 223 224 static inline unsigned long make_jiffies(long secs) 225 { 226 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 227 return MAX_SCHEDULE_TIMEOUT-1; 228 else 229 return secs*HZ; 230 } 231 232 static void xfrm_policy_timer(unsigned long data) 233 { 234 struct xfrm_policy *xp = (struct xfrm_policy*)data; 235 unsigned long now = (unsigned long)xtime.tv_sec; 236 long next = LONG_MAX; 237 int warn = 0; 238 int dir; 239 240 read_lock(&xp->lock); 241 242 if (xp->dead) 243 goto out; 244 245 dir = xfrm_policy_id2dir(xp->index); 246 247 if (xp->lft.hard_add_expires_seconds) { 248 long tmo = xp->lft.hard_add_expires_seconds + 249 xp->curlft.add_time - now; 250 if (tmo <= 0) 251 goto expired; 252 if (tmo < next) 253 next = tmo; 254 } 255 if (xp->lft.hard_use_expires_seconds) { 256 long tmo = xp->lft.hard_use_expires_seconds + 257 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 258 if (tmo <= 0) 259 goto expired; 260 if (tmo < next) 261 next = tmo; 262 } 263 if (xp->lft.soft_add_expires_seconds) { 264 long tmo = xp->lft.soft_add_expires_seconds + 265 xp->curlft.add_time - now; 266 if (tmo <= 0) { 267 warn = 1; 268 tmo = XFRM_KM_TIMEOUT; 269 } 270 if (tmo < next) 271 next = tmo; 272 } 273 if (xp->lft.soft_use_expires_seconds) { 274 long tmo = xp->lft.soft_use_expires_seconds + 275 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 276 if (tmo <= 0) { 277 warn = 1; 278 tmo = XFRM_KM_TIMEOUT; 279 } 280 if (tmo < next) 281 next = tmo; 282 } 283 284 if (warn) 285 km_policy_expired(xp, dir, 0, 0); 286 if (next != LONG_MAX && 287 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 288 xfrm_pol_hold(xp); 289 290 out: 291 read_unlock(&xp->lock); 292 xfrm_pol_put(xp); 293 return; 294 295 expired: 296 read_unlock(&xp->lock); 297 if (!xfrm_policy_delete(xp, dir)) 298 km_policy_expired(xp, dir, 1, 0); 299 xfrm_pol_put(xp); 300 } 301 302 303 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 304 * SPD calls. 305 */ 306 307 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp) 308 { 309 struct xfrm_policy *policy; 310 311 policy = kmalloc(sizeof(struct xfrm_policy), gfp); 312 313 if (policy) { 314 memset(policy, 0, sizeof(struct xfrm_policy)); 315 atomic_set(&policy->refcnt, 1); 316 rwlock_init(&policy->lock); 317 init_timer(&policy->timer); 318 policy->timer.data = (unsigned long)policy; 319 policy->timer.function = xfrm_policy_timer; 320 } 321 return policy; 322 } 323 EXPORT_SYMBOL(xfrm_policy_alloc); 324 325 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 326 327 void __xfrm_policy_destroy(struct xfrm_policy *policy) 328 { 329 BUG_ON(!policy->dead); 330 331 BUG_ON(policy->bundles); 332 333 if (del_timer(&policy->timer)) 334 BUG(); 335 336 security_xfrm_policy_free(policy); 337 kfree(policy); 338 } 339 EXPORT_SYMBOL(__xfrm_policy_destroy); 340 341 static void xfrm_policy_gc_kill(struct xfrm_policy *policy) 342 { 343 struct dst_entry *dst; 344 345 while ((dst = policy->bundles) != NULL) { 346 policy->bundles = dst->next; 347 dst_free(dst); 348 } 349 350 if (del_timer(&policy->timer)) 351 atomic_dec(&policy->refcnt); 352 353 if (atomic_read(&policy->refcnt) > 1) 354 flow_cache_flush(); 355 356 xfrm_pol_put(policy); 357 } 358 359 static void xfrm_policy_gc_task(void *data) 360 { 361 struct xfrm_policy *policy; 362 struct list_head *entry, *tmp; 363 struct list_head gc_list = LIST_HEAD_INIT(gc_list); 364 365 spin_lock_bh(&xfrm_policy_gc_lock); 366 list_splice_init(&xfrm_policy_gc_list, &gc_list); 367 spin_unlock_bh(&xfrm_policy_gc_lock); 368 369 list_for_each_safe(entry, tmp, &gc_list) { 370 policy = list_entry(entry, struct xfrm_policy, list); 371 xfrm_policy_gc_kill(policy); 372 } 373 } 374 375 /* Rule must be locked. Release descentant resources, announce 376 * entry dead. The rule must be unlinked from lists to the moment. 377 */ 378 379 static void xfrm_policy_kill(struct xfrm_policy *policy) 380 { 381 int dead; 382 383 write_lock_bh(&policy->lock); 384 dead = policy->dead; 385 policy->dead = 1; 386 write_unlock_bh(&policy->lock); 387 388 if (unlikely(dead)) { 389 WARN_ON(1); 390 return; 391 } 392 393 spin_lock(&xfrm_policy_gc_lock); 394 list_add(&policy->list, &xfrm_policy_gc_list); 395 spin_unlock(&xfrm_policy_gc_lock); 396 397 schedule_work(&xfrm_policy_gc_work); 398 } 399 400 /* Generate new index... KAME seems to generate them ordered by cost 401 * of an absolute inpredictability of ordering of rules. This will not pass. */ 402 static u32 xfrm_gen_index(int dir) 403 { 404 u32 idx; 405 struct xfrm_policy *p; 406 static u32 idx_generator; 407 408 for (;;) { 409 idx = (idx_generator | dir); 410 idx_generator += 8; 411 if (idx == 0) 412 idx = 8; 413 for (p = xfrm_policy_list[dir]; p; p = p->next) { 414 if (p->index == idx) 415 break; 416 } 417 if (!p) 418 return idx; 419 } 420 } 421 422 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 423 { 424 struct xfrm_policy *pol, **p; 425 struct xfrm_policy *delpol = NULL; 426 struct xfrm_policy **newpos = NULL; 427 struct dst_entry *gc_list; 428 429 write_lock_bh(&xfrm_policy_lock); 430 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL;) { 431 if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0 && 432 xfrm_sec_ctx_match(pol->security, policy->security)) { 433 if (excl) { 434 write_unlock_bh(&xfrm_policy_lock); 435 return -EEXIST; 436 } 437 *p = pol->next; 438 delpol = pol; 439 if (policy->priority > pol->priority) 440 continue; 441 } else if (policy->priority >= pol->priority) { 442 p = &pol->next; 443 continue; 444 } 445 if (!newpos) 446 newpos = p; 447 if (delpol) 448 break; 449 p = &pol->next; 450 } 451 if (newpos) 452 p = newpos; 453 xfrm_pol_hold(policy); 454 policy->next = *p; 455 *p = policy; 456 atomic_inc(&flow_cache_genid); 457 policy->index = delpol ? delpol->index : xfrm_gen_index(dir); 458 policy->curlft.add_time = (unsigned long)xtime.tv_sec; 459 policy->curlft.use_time = 0; 460 if (!mod_timer(&policy->timer, jiffies + HZ)) 461 xfrm_pol_hold(policy); 462 write_unlock_bh(&xfrm_policy_lock); 463 464 if (delpol) 465 xfrm_policy_kill(delpol); 466 467 read_lock_bh(&xfrm_policy_lock); 468 gc_list = NULL; 469 for (policy = policy->next; policy; policy = policy->next) { 470 struct dst_entry *dst; 471 472 write_lock(&policy->lock); 473 dst = policy->bundles; 474 if (dst) { 475 struct dst_entry *tail = dst; 476 while (tail->next) 477 tail = tail->next; 478 tail->next = gc_list; 479 gc_list = dst; 480 481 policy->bundles = NULL; 482 } 483 write_unlock(&policy->lock); 484 } 485 read_unlock_bh(&xfrm_policy_lock); 486 487 while (gc_list) { 488 struct dst_entry *dst = gc_list; 489 490 gc_list = dst->next; 491 dst_free(dst); 492 } 493 494 return 0; 495 } 496 EXPORT_SYMBOL(xfrm_policy_insert); 497 498 struct xfrm_policy *xfrm_policy_bysel_ctx(int dir, struct xfrm_selector *sel, 499 struct xfrm_sec_ctx *ctx, int delete) 500 { 501 struct xfrm_policy *pol, **p; 502 503 write_lock_bh(&xfrm_policy_lock); 504 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { 505 if ((memcmp(sel, &pol->selector, sizeof(*sel)) == 0) && 506 (xfrm_sec_ctx_match(ctx, pol->security))) { 507 xfrm_pol_hold(pol); 508 if (delete) 509 *p = pol->next; 510 break; 511 } 512 } 513 write_unlock_bh(&xfrm_policy_lock); 514 515 if (pol && delete) { 516 atomic_inc(&flow_cache_genid); 517 xfrm_policy_kill(pol); 518 } 519 return pol; 520 } 521 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 522 523 struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete) 524 { 525 struct xfrm_policy *pol, **p; 526 527 write_lock_bh(&xfrm_policy_lock); 528 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { 529 if (pol->index == id) { 530 xfrm_pol_hold(pol); 531 if (delete) 532 *p = pol->next; 533 break; 534 } 535 } 536 write_unlock_bh(&xfrm_policy_lock); 537 538 if (pol && delete) { 539 atomic_inc(&flow_cache_genid); 540 xfrm_policy_kill(pol); 541 } 542 return pol; 543 } 544 EXPORT_SYMBOL(xfrm_policy_byid); 545 546 void xfrm_policy_flush(void) 547 { 548 struct xfrm_policy *xp; 549 int dir; 550 551 write_lock_bh(&xfrm_policy_lock); 552 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 553 while ((xp = xfrm_policy_list[dir]) != NULL) { 554 xfrm_policy_list[dir] = xp->next; 555 write_unlock_bh(&xfrm_policy_lock); 556 557 xfrm_policy_kill(xp); 558 559 write_lock_bh(&xfrm_policy_lock); 560 } 561 } 562 atomic_inc(&flow_cache_genid); 563 write_unlock_bh(&xfrm_policy_lock); 564 } 565 EXPORT_SYMBOL(xfrm_policy_flush); 566 567 int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), 568 void *data) 569 { 570 struct xfrm_policy *xp; 571 int dir; 572 int count = 0; 573 int error = 0; 574 575 read_lock_bh(&xfrm_policy_lock); 576 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { 577 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) 578 count++; 579 } 580 581 if (count == 0) { 582 error = -ENOENT; 583 goto out; 584 } 585 586 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { 587 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) { 588 error = func(xp, dir%XFRM_POLICY_MAX, --count, data); 589 if (error) 590 goto out; 591 } 592 } 593 594 out: 595 read_unlock_bh(&xfrm_policy_lock); 596 return error; 597 } 598 EXPORT_SYMBOL(xfrm_policy_walk); 599 600 /* Find policy to apply to this flow. */ 601 602 static void xfrm_policy_lookup(struct flowi *fl, u32 sk_sid, u16 family, u8 dir, 603 void **objp, atomic_t **obj_refp) 604 { 605 struct xfrm_policy *pol; 606 607 read_lock_bh(&xfrm_policy_lock); 608 for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) { 609 struct xfrm_selector *sel = &pol->selector; 610 int match; 611 612 if (pol->family != family) 613 continue; 614 615 match = xfrm_selector_match(sel, fl, family); 616 617 if (match) { 618 if (!security_xfrm_policy_lookup(pol, sk_sid, dir)) { 619 xfrm_pol_hold(pol); 620 break; 621 } 622 } 623 } 624 read_unlock_bh(&xfrm_policy_lock); 625 if ((*objp = (void *) pol) != NULL) 626 *obj_refp = &pol->refcnt; 627 } 628 629 static inline int policy_to_flow_dir(int dir) 630 { 631 if (XFRM_POLICY_IN == FLOW_DIR_IN && 632 XFRM_POLICY_OUT == FLOW_DIR_OUT && 633 XFRM_POLICY_FWD == FLOW_DIR_FWD) 634 return dir; 635 switch (dir) { 636 default: 637 case XFRM_POLICY_IN: 638 return FLOW_DIR_IN; 639 case XFRM_POLICY_OUT: 640 return FLOW_DIR_OUT; 641 case XFRM_POLICY_FWD: 642 return FLOW_DIR_FWD; 643 }; 644 } 645 646 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl, u32 sk_sid) 647 { 648 struct xfrm_policy *pol; 649 650 read_lock_bh(&xfrm_policy_lock); 651 if ((pol = sk->sk_policy[dir]) != NULL) { 652 int match = xfrm_selector_match(&pol->selector, fl, 653 sk->sk_family); 654 int err = 0; 655 656 if (match) 657 err = security_xfrm_policy_lookup(pol, sk_sid, policy_to_flow_dir(dir)); 658 659 if (match && !err) 660 xfrm_pol_hold(pol); 661 else 662 pol = NULL; 663 } 664 read_unlock_bh(&xfrm_policy_lock); 665 return pol; 666 } 667 668 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 669 { 670 pol->next = xfrm_policy_list[dir]; 671 xfrm_policy_list[dir] = pol; 672 xfrm_pol_hold(pol); 673 } 674 675 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 676 int dir) 677 { 678 struct xfrm_policy **polp; 679 680 for (polp = &xfrm_policy_list[dir]; 681 *polp != NULL; polp = &(*polp)->next) { 682 if (*polp == pol) { 683 *polp = pol->next; 684 return pol; 685 } 686 } 687 return NULL; 688 } 689 690 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 691 { 692 write_lock_bh(&xfrm_policy_lock); 693 pol = __xfrm_policy_unlink(pol, dir); 694 write_unlock_bh(&xfrm_policy_lock); 695 if (pol) { 696 if (dir < XFRM_POLICY_MAX) 697 atomic_inc(&flow_cache_genid); 698 xfrm_policy_kill(pol); 699 return 0; 700 } 701 return -ENOENT; 702 } 703 EXPORT_SYMBOL(xfrm_policy_delete); 704 705 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 706 { 707 struct xfrm_policy *old_pol; 708 709 write_lock_bh(&xfrm_policy_lock); 710 old_pol = sk->sk_policy[dir]; 711 sk->sk_policy[dir] = pol; 712 if (pol) { 713 pol->curlft.add_time = (unsigned long)xtime.tv_sec; 714 pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir); 715 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 716 } 717 if (old_pol) 718 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 719 write_unlock_bh(&xfrm_policy_lock); 720 721 if (old_pol) { 722 xfrm_policy_kill(old_pol); 723 } 724 return 0; 725 } 726 727 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir) 728 { 729 struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC); 730 731 if (newp) { 732 newp->selector = old->selector; 733 if (security_xfrm_policy_clone(old, newp)) { 734 kfree(newp); 735 return NULL; /* ENOMEM */ 736 } 737 newp->lft = old->lft; 738 newp->curlft = old->curlft; 739 newp->action = old->action; 740 newp->flags = old->flags; 741 newp->xfrm_nr = old->xfrm_nr; 742 newp->index = old->index; 743 memcpy(newp->xfrm_vec, old->xfrm_vec, 744 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 745 write_lock_bh(&xfrm_policy_lock); 746 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); 747 write_unlock_bh(&xfrm_policy_lock); 748 xfrm_pol_put(newp); 749 } 750 return newp; 751 } 752 753 int __xfrm_sk_clone_policy(struct sock *sk) 754 { 755 struct xfrm_policy *p0 = sk->sk_policy[0], 756 *p1 = sk->sk_policy[1]; 757 758 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 759 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 760 return -ENOMEM; 761 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 762 return -ENOMEM; 763 return 0; 764 } 765 766 /* Resolve list of templates for the flow, given policy. */ 767 768 static int 769 xfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl, 770 struct xfrm_state **xfrm, 771 unsigned short family) 772 { 773 int nx; 774 int i, error; 775 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 776 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 777 778 for (nx=0, i = 0; i < policy->xfrm_nr; i++) { 779 struct xfrm_state *x; 780 xfrm_address_t *remote = daddr; 781 xfrm_address_t *local = saddr; 782 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 783 784 if (tmpl->mode) { 785 remote = &tmpl->id.daddr; 786 local = &tmpl->saddr; 787 } 788 789 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 790 791 if (x && x->km.state == XFRM_STATE_VALID) { 792 xfrm[nx++] = x; 793 daddr = remote; 794 saddr = local; 795 continue; 796 } 797 if (x) { 798 error = (x->km.state == XFRM_STATE_ERROR ? 799 -EINVAL : -EAGAIN); 800 xfrm_state_put(x); 801 } 802 803 if (!tmpl->optional) 804 goto fail; 805 } 806 return nx; 807 808 fail: 809 for (nx--; nx>=0; nx--) 810 xfrm_state_put(xfrm[nx]); 811 return error; 812 } 813 814 /* Check that the bundle accepts the flow and its components are 815 * still valid. 816 */ 817 818 static struct dst_entry * 819 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family) 820 { 821 struct dst_entry *x; 822 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 823 if (unlikely(afinfo == NULL)) 824 return ERR_PTR(-EINVAL); 825 x = afinfo->find_bundle(fl, policy); 826 xfrm_policy_put_afinfo(afinfo); 827 return x; 828 } 829 830 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 831 * all the metrics... Shortly, bundle a bundle. 832 */ 833 834 static int 835 xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx, 836 struct flowi *fl, struct dst_entry **dst_p, 837 unsigned short family) 838 { 839 int err; 840 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 841 if (unlikely(afinfo == NULL)) 842 return -EINVAL; 843 err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p); 844 xfrm_policy_put_afinfo(afinfo); 845 return err; 846 } 847 848 849 static int stale_bundle(struct dst_entry *dst); 850 851 /* Main function: finds/creates a bundle for given flow. 852 * 853 * At the moment we eat a raw IP route. Mostly to speed up lookups 854 * on interfaces with disabled IPsec. 855 */ 856 int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, 857 struct sock *sk, int flags) 858 { 859 struct xfrm_policy *policy; 860 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 861 struct dst_entry *dst, *dst_orig = *dst_p; 862 int nx = 0; 863 int err; 864 u32 genid; 865 u16 family; 866 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 867 u32 sk_sid = security_sk_sid(sk, fl, dir); 868 restart: 869 genid = atomic_read(&flow_cache_genid); 870 policy = NULL; 871 if (sk && sk->sk_policy[1]) 872 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, sk_sid); 873 874 if (!policy) { 875 /* To accelerate a bit... */ 876 if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT]) 877 return 0; 878 879 policy = flow_cache_lookup(fl, sk_sid, dst_orig->ops->family, 880 dir, xfrm_policy_lookup); 881 } 882 883 if (!policy) 884 return 0; 885 886 family = dst_orig->ops->family; 887 policy->curlft.use_time = (unsigned long)xtime.tv_sec; 888 889 switch (policy->action) { 890 case XFRM_POLICY_BLOCK: 891 /* Prohibit the flow */ 892 err = -EPERM; 893 goto error; 894 895 case XFRM_POLICY_ALLOW: 896 if (policy->xfrm_nr == 0) { 897 /* Flow passes not transformed. */ 898 xfrm_pol_put(policy); 899 return 0; 900 } 901 902 /* Try to find matching bundle. 903 * 904 * LATER: help from flow cache. It is optional, this 905 * is required only for output policy. 906 */ 907 dst = xfrm_find_bundle(fl, policy, family); 908 if (IS_ERR(dst)) { 909 err = PTR_ERR(dst); 910 goto error; 911 } 912 913 if (dst) 914 break; 915 916 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family); 917 918 if (unlikely(nx<0)) { 919 err = nx; 920 if (err == -EAGAIN && flags) { 921 DECLARE_WAITQUEUE(wait, current); 922 923 add_wait_queue(&km_waitq, &wait); 924 set_current_state(TASK_INTERRUPTIBLE); 925 schedule(); 926 set_current_state(TASK_RUNNING); 927 remove_wait_queue(&km_waitq, &wait); 928 929 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family); 930 931 if (nx == -EAGAIN && signal_pending(current)) { 932 err = -ERESTART; 933 goto error; 934 } 935 if (nx == -EAGAIN || 936 genid != atomic_read(&flow_cache_genid)) { 937 xfrm_pol_put(policy); 938 goto restart; 939 } 940 err = nx; 941 } 942 if (err < 0) 943 goto error; 944 } 945 if (nx == 0) { 946 /* Flow passes not transformed. */ 947 xfrm_pol_put(policy); 948 return 0; 949 } 950 951 dst = dst_orig; 952 err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family); 953 954 if (unlikely(err)) { 955 int i; 956 for (i=0; i<nx; i++) 957 xfrm_state_put(xfrm[i]); 958 goto error; 959 } 960 961 write_lock_bh(&policy->lock); 962 if (unlikely(policy->dead || stale_bundle(dst))) { 963 /* Wow! While we worked on resolving, this 964 * policy has gone. Retry. It is not paranoia, 965 * we just cannot enlist new bundle to dead object. 966 * We can't enlist stable bundles either. 967 */ 968 write_unlock_bh(&policy->lock); 969 if (dst) 970 dst_free(dst); 971 972 err = -EHOSTUNREACH; 973 goto error; 974 } 975 dst->next = policy->bundles; 976 policy->bundles = dst; 977 dst_hold(dst); 978 write_unlock_bh(&policy->lock); 979 } 980 *dst_p = dst; 981 dst_release(dst_orig); 982 xfrm_pol_put(policy); 983 return 0; 984 985 error: 986 dst_release(dst_orig); 987 xfrm_pol_put(policy); 988 *dst_p = NULL; 989 return err; 990 } 991 EXPORT_SYMBOL(xfrm_lookup); 992 993 /* When skb is transformed back to its "native" form, we have to 994 * check policy restrictions. At the moment we make this in maximally 995 * stupid way. Shame on me. :-) Of course, connected sockets must 996 * have policy cached at them. 997 */ 998 999 static inline int 1000 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x, 1001 unsigned short family) 1002 { 1003 if (xfrm_state_kern(x)) 1004 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family); 1005 return x->id.proto == tmpl->id.proto && 1006 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1007 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1008 x->props.mode == tmpl->mode && 1009 (tmpl->aalgos & (1<<x->props.aalgo)) && 1010 !(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family)); 1011 } 1012 1013 static inline int 1014 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start, 1015 unsigned short family) 1016 { 1017 int idx = start; 1018 1019 if (tmpl->optional) { 1020 if (!tmpl->mode) 1021 return start; 1022 } else 1023 start = -1; 1024 for (; idx < sp->len; idx++) { 1025 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 1026 return ++idx; 1027 if (sp->xvec[idx]->props.mode) 1028 break; 1029 } 1030 return start; 1031 } 1032 1033 int 1034 xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family) 1035 { 1036 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1037 1038 if (unlikely(afinfo == NULL)) 1039 return -EAFNOSUPPORT; 1040 1041 afinfo->decode_session(skb, fl); 1042 xfrm_policy_put_afinfo(afinfo); 1043 return 0; 1044 } 1045 EXPORT_SYMBOL(xfrm_decode_session); 1046 1047 static inline int secpath_has_tunnel(struct sec_path *sp, int k) 1048 { 1049 for (; k < sp->len; k++) { 1050 if (sp->xvec[k]->props.mode) 1051 return 1; 1052 } 1053 1054 return 0; 1055 } 1056 1057 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 1058 unsigned short family) 1059 { 1060 struct xfrm_policy *pol; 1061 struct flowi fl; 1062 u8 fl_dir = policy_to_flow_dir(dir); 1063 u32 sk_sid; 1064 1065 if (xfrm_decode_session(skb, &fl, family) < 0) 1066 return 0; 1067 nf_nat_decode_session(skb, &fl, family); 1068 1069 sk_sid = security_sk_sid(sk, &fl, fl_dir); 1070 1071 /* First, check used SA against their selectors. */ 1072 if (skb->sp) { 1073 int i; 1074 1075 for (i=skb->sp->len-1; i>=0; i--) { 1076 struct xfrm_state *x = skb->sp->xvec[i]; 1077 if (!xfrm_selector_match(&x->sel, &fl, family)) 1078 return 0; 1079 } 1080 } 1081 1082 pol = NULL; 1083 if (sk && sk->sk_policy[dir]) 1084 pol = xfrm_sk_policy_lookup(sk, dir, &fl, sk_sid); 1085 1086 if (!pol) 1087 pol = flow_cache_lookup(&fl, sk_sid, family, fl_dir, 1088 xfrm_policy_lookup); 1089 1090 if (!pol) 1091 return !skb->sp || !secpath_has_tunnel(skb->sp, 0); 1092 1093 pol->curlft.use_time = (unsigned long)xtime.tv_sec; 1094 1095 if (pol->action == XFRM_POLICY_ALLOW) { 1096 struct sec_path *sp; 1097 static struct sec_path dummy; 1098 int i, k; 1099 1100 if ((sp = skb->sp) == NULL) 1101 sp = &dummy; 1102 1103 /* For each tunnel xfrm, find the first matching tmpl. 1104 * For each tmpl before that, find corresponding xfrm. 1105 * Order is _important_. Later we will implement 1106 * some barriers, but at the moment barriers 1107 * are implied between each two transformations. 1108 */ 1109 for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) { 1110 k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family); 1111 if (k < 0) 1112 goto reject; 1113 } 1114 1115 if (secpath_has_tunnel(sp, k)) 1116 goto reject; 1117 1118 xfrm_pol_put(pol); 1119 return 1; 1120 } 1121 1122 reject: 1123 xfrm_pol_put(pol); 1124 return 0; 1125 } 1126 EXPORT_SYMBOL(__xfrm_policy_check); 1127 1128 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 1129 { 1130 struct flowi fl; 1131 1132 if (xfrm_decode_session(skb, &fl, family) < 0) 1133 return 0; 1134 1135 return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0; 1136 } 1137 EXPORT_SYMBOL(__xfrm_route_forward); 1138 1139 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 1140 { 1141 /* If it is marked obsolete, which is how we even get here, 1142 * then we have purged it from the policy bundle list and we 1143 * did that for a good reason. 1144 */ 1145 return NULL; 1146 } 1147 1148 static int stale_bundle(struct dst_entry *dst) 1149 { 1150 return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC); 1151 } 1152 1153 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 1154 { 1155 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 1156 dst->dev = &loopback_dev; 1157 dev_hold(&loopback_dev); 1158 dev_put(dev); 1159 } 1160 } 1161 EXPORT_SYMBOL(xfrm_dst_ifdown); 1162 1163 static void xfrm_link_failure(struct sk_buff *skb) 1164 { 1165 /* Impossible. Such dst must be popped before reaches point of failure. */ 1166 return; 1167 } 1168 1169 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 1170 { 1171 if (dst) { 1172 if (dst->obsolete) { 1173 dst_release(dst); 1174 dst = NULL; 1175 } 1176 } 1177 return dst; 1178 } 1179 1180 static void xfrm_prune_bundles(int (*func)(struct dst_entry *)) 1181 { 1182 int i; 1183 struct xfrm_policy *pol; 1184 struct dst_entry *dst, **dstp, *gc_list = NULL; 1185 1186 read_lock_bh(&xfrm_policy_lock); 1187 for (i=0; i<2*XFRM_POLICY_MAX; i++) { 1188 for (pol = xfrm_policy_list[i]; pol; pol = pol->next) { 1189 write_lock(&pol->lock); 1190 dstp = &pol->bundles; 1191 while ((dst=*dstp) != NULL) { 1192 if (func(dst)) { 1193 *dstp = dst->next; 1194 dst->next = gc_list; 1195 gc_list = dst; 1196 } else { 1197 dstp = &dst->next; 1198 } 1199 } 1200 write_unlock(&pol->lock); 1201 } 1202 } 1203 read_unlock_bh(&xfrm_policy_lock); 1204 1205 while (gc_list) { 1206 dst = gc_list; 1207 gc_list = dst->next; 1208 dst_free(dst); 1209 } 1210 } 1211 1212 static int unused_bundle(struct dst_entry *dst) 1213 { 1214 return !atomic_read(&dst->__refcnt); 1215 } 1216 1217 static void __xfrm_garbage_collect(void) 1218 { 1219 xfrm_prune_bundles(unused_bundle); 1220 } 1221 1222 int xfrm_flush_bundles(void) 1223 { 1224 xfrm_prune_bundles(stale_bundle); 1225 return 0; 1226 } 1227 1228 static int always_true(struct dst_entry *dst) 1229 { 1230 return 1; 1231 } 1232 1233 void xfrm_flush_all_bundles(void) 1234 { 1235 xfrm_prune_bundles(always_true); 1236 } 1237 1238 void xfrm_init_pmtu(struct dst_entry *dst) 1239 { 1240 do { 1241 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1242 u32 pmtu, route_mtu_cached; 1243 1244 pmtu = dst_mtu(dst->child); 1245 xdst->child_mtu_cached = pmtu; 1246 1247 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 1248 1249 route_mtu_cached = dst_mtu(xdst->route); 1250 xdst->route_mtu_cached = route_mtu_cached; 1251 1252 if (pmtu > route_mtu_cached) 1253 pmtu = route_mtu_cached; 1254 1255 dst->metrics[RTAX_MTU-1] = pmtu; 1256 } while ((dst = dst->next)); 1257 } 1258 1259 EXPORT_SYMBOL(xfrm_init_pmtu); 1260 1261 /* Check that the bundle accepts the flow and its components are 1262 * still valid. 1263 */ 1264 1265 int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family) 1266 { 1267 struct dst_entry *dst = &first->u.dst; 1268 struct xfrm_dst *last; 1269 u32 mtu; 1270 1271 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 1272 (dst->dev && !netif_running(dst->dev))) 1273 return 0; 1274 1275 last = NULL; 1276 1277 do { 1278 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1279 1280 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family)) 1281 return 0; 1282 if (dst->xfrm->km.state != XFRM_STATE_VALID) 1283 return 0; 1284 1285 mtu = dst_mtu(dst->child); 1286 if (xdst->child_mtu_cached != mtu) { 1287 last = xdst; 1288 xdst->child_mtu_cached = mtu; 1289 } 1290 1291 if (!dst_check(xdst->route, xdst->route_cookie)) 1292 return 0; 1293 mtu = dst_mtu(xdst->route); 1294 if (xdst->route_mtu_cached != mtu) { 1295 last = xdst; 1296 xdst->route_mtu_cached = mtu; 1297 } 1298 1299 dst = dst->child; 1300 } while (dst->xfrm); 1301 1302 if (likely(!last)) 1303 return 1; 1304 1305 mtu = last->child_mtu_cached; 1306 for (;;) { 1307 dst = &last->u.dst; 1308 1309 mtu = xfrm_state_mtu(dst->xfrm, mtu); 1310 if (mtu > last->route_mtu_cached) 1311 mtu = last->route_mtu_cached; 1312 dst->metrics[RTAX_MTU-1] = mtu; 1313 1314 if (last == first) 1315 break; 1316 1317 last = last->u.next; 1318 last->child_mtu_cached = mtu; 1319 } 1320 1321 return 1; 1322 } 1323 1324 EXPORT_SYMBOL(xfrm_bundle_ok); 1325 1326 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 1327 { 1328 int err = 0; 1329 if (unlikely(afinfo == NULL)) 1330 return -EINVAL; 1331 if (unlikely(afinfo->family >= NPROTO)) 1332 return -EAFNOSUPPORT; 1333 write_lock_bh(&xfrm_policy_afinfo_lock); 1334 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 1335 err = -ENOBUFS; 1336 else { 1337 struct dst_ops *dst_ops = afinfo->dst_ops; 1338 if (likely(dst_ops->kmem_cachep == NULL)) 1339 dst_ops->kmem_cachep = xfrm_dst_cache; 1340 if (likely(dst_ops->check == NULL)) 1341 dst_ops->check = xfrm_dst_check; 1342 if (likely(dst_ops->negative_advice == NULL)) 1343 dst_ops->negative_advice = xfrm_negative_advice; 1344 if (likely(dst_ops->link_failure == NULL)) 1345 dst_ops->link_failure = xfrm_link_failure; 1346 if (likely(afinfo->garbage_collect == NULL)) 1347 afinfo->garbage_collect = __xfrm_garbage_collect; 1348 xfrm_policy_afinfo[afinfo->family] = afinfo; 1349 } 1350 write_unlock_bh(&xfrm_policy_afinfo_lock); 1351 return err; 1352 } 1353 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 1354 1355 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) 1356 { 1357 int err = 0; 1358 if (unlikely(afinfo == NULL)) 1359 return -EINVAL; 1360 if (unlikely(afinfo->family >= NPROTO)) 1361 return -EAFNOSUPPORT; 1362 write_lock_bh(&xfrm_policy_afinfo_lock); 1363 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 1364 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 1365 err = -EINVAL; 1366 else { 1367 struct dst_ops *dst_ops = afinfo->dst_ops; 1368 xfrm_policy_afinfo[afinfo->family] = NULL; 1369 dst_ops->kmem_cachep = NULL; 1370 dst_ops->check = NULL; 1371 dst_ops->negative_advice = NULL; 1372 dst_ops->link_failure = NULL; 1373 afinfo->garbage_collect = NULL; 1374 } 1375 } 1376 write_unlock_bh(&xfrm_policy_afinfo_lock); 1377 return err; 1378 } 1379 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 1380 1381 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 1382 { 1383 struct xfrm_policy_afinfo *afinfo; 1384 if (unlikely(family >= NPROTO)) 1385 return NULL; 1386 read_lock(&xfrm_policy_afinfo_lock); 1387 afinfo = xfrm_policy_afinfo[family]; 1388 if (unlikely(!afinfo)) 1389 read_unlock(&xfrm_policy_afinfo_lock); 1390 return afinfo; 1391 } 1392 1393 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) 1394 { 1395 read_unlock(&xfrm_policy_afinfo_lock); 1396 } 1397 1398 static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family) 1399 { 1400 struct xfrm_policy_afinfo *afinfo; 1401 if (unlikely(family >= NPROTO)) 1402 return NULL; 1403 write_lock_bh(&xfrm_policy_afinfo_lock); 1404 afinfo = xfrm_policy_afinfo[family]; 1405 if (unlikely(!afinfo)) 1406 write_unlock_bh(&xfrm_policy_afinfo_lock); 1407 return afinfo; 1408 } 1409 1410 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo) 1411 { 1412 write_unlock_bh(&xfrm_policy_afinfo_lock); 1413 } 1414 1415 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 1416 { 1417 switch (event) { 1418 case NETDEV_DOWN: 1419 xfrm_flush_bundles(); 1420 } 1421 return NOTIFY_DONE; 1422 } 1423 1424 static struct notifier_block xfrm_dev_notifier = { 1425 xfrm_dev_event, 1426 NULL, 1427 0 1428 }; 1429 1430 static void __init xfrm_policy_init(void) 1431 { 1432 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 1433 sizeof(struct xfrm_dst), 1434 0, SLAB_HWCACHE_ALIGN, 1435 NULL, NULL); 1436 if (!xfrm_dst_cache) 1437 panic("XFRM: failed to allocate xfrm_dst_cache\n"); 1438 1439 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); 1440 register_netdevice_notifier(&xfrm_dev_notifier); 1441 } 1442 1443 void __init xfrm_init(void) 1444 { 1445 xfrm_state_init(); 1446 xfrm_policy_init(); 1447 xfrm_input_init(); 1448 } 1449 1450