1 /* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16 #include <linux/slab.h> 17 #include <linux/kmod.h> 18 #include <linux/list.h> 19 #include <linux/spinlock.h> 20 #include <linux/workqueue.h> 21 #include <linux/notifier.h> 22 #include <linux/netdevice.h> 23 #include <linux/netfilter.h> 24 #include <linux/module.h> 25 #include <net/xfrm.h> 26 #include <net/ip.h> 27 28 DEFINE_MUTEX(xfrm_cfg_mutex); 29 EXPORT_SYMBOL(xfrm_cfg_mutex); 30 31 static DEFINE_RWLOCK(xfrm_policy_lock); 32 33 struct xfrm_policy *xfrm_policy_list[XFRM_POLICY_MAX*2]; 34 EXPORT_SYMBOL(xfrm_policy_list); 35 36 static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 37 static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 38 39 static kmem_cache_t *xfrm_dst_cache __read_mostly; 40 41 static struct work_struct xfrm_policy_gc_work; 42 static struct list_head xfrm_policy_gc_list = 43 LIST_HEAD_INIT(xfrm_policy_gc_list); 44 static DEFINE_SPINLOCK(xfrm_policy_gc_lock); 45 46 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); 47 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 48 static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family); 49 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo); 50 51 int xfrm_register_type(struct xfrm_type *type, unsigned short family) 52 { 53 struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family); 54 struct xfrm_type **typemap; 55 int err = 0; 56 57 if (unlikely(afinfo == NULL)) 58 return -EAFNOSUPPORT; 59 typemap = afinfo->type_map; 60 61 if (likely(typemap[type->proto] == NULL)) 62 typemap[type->proto] = type; 63 else 64 err = -EEXIST; 65 xfrm_policy_unlock_afinfo(afinfo); 66 return err; 67 } 68 EXPORT_SYMBOL(xfrm_register_type); 69 70 int xfrm_unregister_type(struct xfrm_type *type, unsigned short family) 71 { 72 struct xfrm_policy_afinfo *afinfo = xfrm_policy_lock_afinfo(family); 73 struct xfrm_type **typemap; 74 int err = 0; 75 76 if (unlikely(afinfo == NULL)) 77 return -EAFNOSUPPORT; 78 typemap = afinfo->type_map; 79 80 if (unlikely(typemap[type->proto] != type)) 81 err = -ENOENT; 82 else 83 typemap[type->proto] = NULL; 84 xfrm_policy_unlock_afinfo(afinfo); 85 return err; 86 } 87 EXPORT_SYMBOL(xfrm_unregister_type); 88 89 struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family) 90 { 91 struct xfrm_policy_afinfo *afinfo; 92 struct xfrm_type **typemap; 93 struct xfrm_type *type; 94 int modload_attempted = 0; 95 96 retry: 97 afinfo = xfrm_policy_get_afinfo(family); 98 if (unlikely(afinfo == NULL)) 99 return NULL; 100 typemap = afinfo->type_map; 101 102 type = typemap[proto]; 103 if (unlikely(type && !try_module_get(type->owner))) 104 type = NULL; 105 if (!type && !modload_attempted) { 106 xfrm_policy_put_afinfo(afinfo); 107 request_module("xfrm-type-%d-%d", 108 (int) family, (int) proto); 109 modload_attempted = 1; 110 goto retry; 111 } 112 113 xfrm_policy_put_afinfo(afinfo); 114 return type; 115 } 116 117 int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, 118 unsigned short family) 119 { 120 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 121 int err = 0; 122 123 if (unlikely(afinfo == NULL)) 124 return -EAFNOSUPPORT; 125 126 if (likely(afinfo->dst_lookup != NULL)) 127 err = afinfo->dst_lookup(dst, fl); 128 else 129 err = -EINVAL; 130 xfrm_policy_put_afinfo(afinfo); 131 return err; 132 } 133 EXPORT_SYMBOL(xfrm_dst_lookup); 134 135 void xfrm_put_type(struct xfrm_type *type) 136 { 137 module_put(type->owner); 138 } 139 140 int xfrm_register_mode(struct xfrm_mode *mode, int family) 141 { 142 struct xfrm_policy_afinfo *afinfo; 143 struct xfrm_mode **modemap; 144 int err; 145 146 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 147 return -EINVAL; 148 149 afinfo = xfrm_policy_lock_afinfo(family); 150 if (unlikely(afinfo == NULL)) 151 return -EAFNOSUPPORT; 152 153 err = -EEXIST; 154 modemap = afinfo->mode_map; 155 if (likely(modemap[mode->encap] == NULL)) { 156 modemap[mode->encap] = mode; 157 err = 0; 158 } 159 160 xfrm_policy_unlock_afinfo(afinfo); 161 return err; 162 } 163 EXPORT_SYMBOL(xfrm_register_mode); 164 165 int xfrm_unregister_mode(struct xfrm_mode *mode, int family) 166 { 167 struct xfrm_policy_afinfo *afinfo; 168 struct xfrm_mode **modemap; 169 int err; 170 171 if (unlikely(mode->encap >= XFRM_MODE_MAX)) 172 return -EINVAL; 173 174 afinfo = xfrm_policy_lock_afinfo(family); 175 if (unlikely(afinfo == NULL)) 176 return -EAFNOSUPPORT; 177 178 err = -ENOENT; 179 modemap = afinfo->mode_map; 180 if (likely(modemap[mode->encap] == mode)) { 181 modemap[mode->encap] = NULL; 182 err = 0; 183 } 184 185 xfrm_policy_unlock_afinfo(afinfo); 186 return err; 187 } 188 EXPORT_SYMBOL(xfrm_unregister_mode); 189 190 struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family) 191 { 192 struct xfrm_policy_afinfo *afinfo; 193 struct xfrm_mode *mode; 194 int modload_attempted = 0; 195 196 if (unlikely(encap >= XFRM_MODE_MAX)) 197 return NULL; 198 199 retry: 200 afinfo = xfrm_policy_get_afinfo(family); 201 if (unlikely(afinfo == NULL)) 202 return NULL; 203 204 mode = afinfo->mode_map[encap]; 205 if (unlikely(mode && !try_module_get(mode->owner))) 206 mode = NULL; 207 if (!mode && !modload_attempted) { 208 xfrm_policy_put_afinfo(afinfo); 209 request_module("xfrm-mode-%d-%d", family, encap); 210 modload_attempted = 1; 211 goto retry; 212 } 213 214 xfrm_policy_put_afinfo(afinfo); 215 return mode; 216 } 217 218 void xfrm_put_mode(struct xfrm_mode *mode) 219 { 220 module_put(mode->owner); 221 } 222 223 static inline unsigned long make_jiffies(long secs) 224 { 225 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 226 return MAX_SCHEDULE_TIMEOUT-1; 227 else 228 return secs*HZ; 229 } 230 231 static void xfrm_policy_timer(unsigned long data) 232 { 233 struct xfrm_policy *xp = (struct xfrm_policy*)data; 234 unsigned long now = (unsigned long)xtime.tv_sec; 235 long next = LONG_MAX; 236 int warn = 0; 237 int dir; 238 239 read_lock(&xp->lock); 240 241 if (xp->dead) 242 goto out; 243 244 dir = xfrm_policy_id2dir(xp->index); 245 246 if (xp->lft.hard_add_expires_seconds) { 247 long tmo = xp->lft.hard_add_expires_seconds + 248 xp->curlft.add_time - now; 249 if (tmo <= 0) 250 goto expired; 251 if (tmo < next) 252 next = tmo; 253 } 254 if (xp->lft.hard_use_expires_seconds) { 255 long tmo = xp->lft.hard_use_expires_seconds + 256 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 257 if (tmo <= 0) 258 goto expired; 259 if (tmo < next) 260 next = tmo; 261 } 262 if (xp->lft.soft_add_expires_seconds) { 263 long tmo = xp->lft.soft_add_expires_seconds + 264 xp->curlft.add_time - now; 265 if (tmo <= 0) { 266 warn = 1; 267 tmo = XFRM_KM_TIMEOUT; 268 } 269 if (tmo < next) 270 next = tmo; 271 } 272 if (xp->lft.soft_use_expires_seconds) { 273 long tmo = xp->lft.soft_use_expires_seconds + 274 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 275 if (tmo <= 0) { 276 warn = 1; 277 tmo = XFRM_KM_TIMEOUT; 278 } 279 if (tmo < next) 280 next = tmo; 281 } 282 283 if (warn) 284 km_policy_expired(xp, dir, 0, 0); 285 if (next != LONG_MAX && 286 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 287 xfrm_pol_hold(xp); 288 289 out: 290 read_unlock(&xp->lock); 291 xfrm_pol_put(xp); 292 return; 293 294 expired: 295 read_unlock(&xp->lock); 296 if (!xfrm_policy_delete(xp, dir)) 297 km_policy_expired(xp, dir, 1, 0); 298 xfrm_pol_put(xp); 299 } 300 301 302 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 303 * SPD calls. 304 */ 305 306 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp) 307 { 308 struct xfrm_policy *policy; 309 310 policy = kmalloc(sizeof(struct xfrm_policy), gfp); 311 312 if (policy) { 313 memset(policy, 0, sizeof(struct xfrm_policy)); 314 atomic_set(&policy->refcnt, 1); 315 rwlock_init(&policy->lock); 316 init_timer(&policy->timer); 317 policy->timer.data = (unsigned long)policy; 318 policy->timer.function = xfrm_policy_timer; 319 } 320 return policy; 321 } 322 EXPORT_SYMBOL(xfrm_policy_alloc); 323 324 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 325 326 void __xfrm_policy_destroy(struct xfrm_policy *policy) 327 { 328 BUG_ON(!policy->dead); 329 330 BUG_ON(policy->bundles); 331 332 if (del_timer(&policy->timer)) 333 BUG(); 334 335 security_xfrm_policy_free(policy); 336 kfree(policy); 337 } 338 EXPORT_SYMBOL(__xfrm_policy_destroy); 339 340 static void xfrm_policy_gc_kill(struct xfrm_policy *policy) 341 { 342 struct dst_entry *dst; 343 344 while ((dst = policy->bundles) != NULL) { 345 policy->bundles = dst->next; 346 dst_free(dst); 347 } 348 349 if (del_timer(&policy->timer)) 350 atomic_dec(&policy->refcnt); 351 352 if (atomic_read(&policy->refcnt) > 1) 353 flow_cache_flush(); 354 355 xfrm_pol_put(policy); 356 } 357 358 static void xfrm_policy_gc_task(void *data) 359 { 360 struct xfrm_policy *policy; 361 struct list_head *entry, *tmp; 362 struct list_head gc_list = LIST_HEAD_INIT(gc_list); 363 364 spin_lock_bh(&xfrm_policy_gc_lock); 365 list_splice_init(&xfrm_policy_gc_list, &gc_list); 366 spin_unlock_bh(&xfrm_policy_gc_lock); 367 368 list_for_each_safe(entry, tmp, &gc_list) { 369 policy = list_entry(entry, struct xfrm_policy, list); 370 xfrm_policy_gc_kill(policy); 371 } 372 } 373 374 /* Rule must be locked. Release descentant resources, announce 375 * entry dead. The rule must be unlinked from lists to the moment. 376 */ 377 378 static void xfrm_policy_kill(struct xfrm_policy *policy) 379 { 380 int dead; 381 382 write_lock_bh(&policy->lock); 383 dead = policy->dead; 384 policy->dead = 1; 385 write_unlock_bh(&policy->lock); 386 387 if (unlikely(dead)) { 388 WARN_ON(1); 389 return; 390 } 391 392 spin_lock(&xfrm_policy_gc_lock); 393 list_add(&policy->list, &xfrm_policy_gc_list); 394 spin_unlock(&xfrm_policy_gc_lock); 395 396 schedule_work(&xfrm_policy_gc_work); 397 } 398 399 /* Generate new index... KAME seems to generate them ordered by cost 400 * of an absolute inpredictability of ordering of rules. This will not pass. */ 401 static u32 xfrm_gen_index(int dir) 402 { 403 u32 idx; 404 struct xfrm_policy *p; 405 static u32 idx_generator; 406 407 for (;;) { 408 idx = (idx_generator | dir); 409 idx_generator += 8; 410 if (idx == 0) 411 idx = 8; 412 for (p = xfrm_policy_list[dir]; p; p = p->next) { 413 if (p->index == idx) 414 break; 415 } 416 if (!p) 417 return idx; 418 } 419 } 420 421 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 422 { 423 struct xfrm_policy *pol, **p; 424 struct xfrm_policy *delpol = NULL; 425 struct xfrm_policy **newpos = NULL; 426 struct dst_entry *gc_list; 427 428 write_lock_bh(&xfrm_policy_lock); 429 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL;) { 430 if (!delpol && memcmp(&policy->selector, &pol->selector, sizeof(pol->selector)) == 0 && 431 xfrm_sec_ctx_match(pol->security, policy->security)) { 432 if (excl) { 433 write_unlock_bh(&xfrm_policy_lock); 434 return -EEXIST; 435 } 436 *p = pol->next; 437 delpol = pol; 438 if (policy->priority > pol->priority) 439 continue; 440 } else if (policy->priority >= pol->priority) { 441 p = &pol->next; 442 continue; 443 } 444 if (!newpos) 445 newpos = p; 446 if (delpol) 447 break; 448 p = &pol->next; 449 } 450 if (newpos) 451 p = newpos; 452 xfrm_pol_hold(policy); 453 policy->next = *p; 454 *p = policy; 455 atomic_inc(&flow_cache_genid); 456 policy->index = delpol ? delpol->index : xfrm_gen_index(dir); 457 policy->curlft.add_time = (unsigned long)xtime.tv_sec; 458 policy->curlft.use_time = 0; 459 if (!mod_timer(&policy->timer, jiffies + HZ)) 460 xfrm_pol_hold(policy); 461 write_unlock_bh(&xfrm_policy_lock); 462 463 if (delpol) 464 xfrm_policy_kill(delpol); 465 466 read_lock_bh(&xfrm_policy_lock); 467 gc_list = NULL; 468 for (policy = policy->next; policy; policy = policy->next) { 469 struct dst_entry *dst; 470 471 write_lock(&policy->lock); 472 dst = policy->bundles; 473 if (dst) { 474 struct dst_entry *tail = dst; 475 while (tail->next) 476 tail = tail->next; 477 tail->next = gc_list; 478 gc_list = dst; 479 480 policy->bundles = NULL; 481 } 482 write_unlock(&policy->lock); 483 } 484 read_unlock_bh(&xfrm_policy_lock); 485 486 while (gc_list) { 487 struct dst_entry *dst = gc_list; 488 489 gc_list = dst->next; 490 dst_free(dst); 491 } 492 493 return 0; 494 } 495 EXPORT_SYMBOL(xfrm_policy_insert); 496 497 struct xfrm_policy *xfrm_policy_bysel_ctx(int dir, struct xfrm_selector *sel, 498 struct xfrm_sec_ctx *ctx, int delete) 499 { 500 struct xfrm_policy *pol, **p; 501 502 write_lock_bh(&xfrm_policy_lock); 503 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { 504 if ((memcmp(sel, &pol->selector, sizeof(*sel)) == 0) && 505 (xfrm_sec_ctx_match(ctx, pol->security))) { 506 xfrm_pol_hold(pol); 507 if (delete) 508 *p = pol->next; 509 break; 510 } 511 } 512 write_unlock_bh(&xfrm_policy_lock); 513 514 if (pol && delete) { 515 atomic_inc(&flow_cache_genid); 516 xfrm_policy_kill(pol); 517 } 518 return pol; 519 } 520 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 521 522 struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete) 523 { 524 struct xfrm_policy *pol, **p; 525 526 write_lock_bh(&xfrm_policy_lock); 527 for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { 528 if (pol->index == id) { 529 xfrm_pol_hold(pol); 530 if (delete) 531 *p = pol->next; 532 break; 533 } 534 } 535 write_unlock_bh(&xfrm_policy_lock); 536 537 if (pol && delete) { 538 atomic_inc(&flow_cache_genid); 539 xfrm_policy_kill(pol); 540 } 541 return pol; 542 } 543 EXPORT_SYMBOL(xfrm_policy_byid); 544 545 void xfrm_policy_flush(void) 546 { 547 struct xfrm_policy *xp; 548 int dir; 549 550 write_lock_bh(&xfrm_policy_lock); 551 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 552 while ((xp = xfrm_policy_list[dir]) != NULL) { 553 xfrm_policy_list[dir] = xp->next; 554 write_unlock_bh(&xfrm_policy_lock); 555 556 xfrm_policy_kill(xp); 557 558 write_lock_bh(&xfrm_policy_lock); 559 } 560 } 561 atomic_inc(&flow_cache_genid); 562 write_unlock_bh(&xfrm_policy_lock); 563 } 564 EXPORT_SYMBOL(xfrm_policy_flush); 565 566 int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), 567 void *data) 568 { 569 struct xfrm_policy *xp; 570 int dir; 571 int count = 0; 572 int error = 0; 573 574 read_lock_bh(&xfrm_policy_lock); 575 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { 576 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) 577 count++; 578 } 579 580 if (count == 0) { 581 error = -ENOENT; 582 goto out; 583 } 584 585 for (dir = 0; dir < 2*XFRM_POLICY_MAX; dir++) { 586 for (xp = xfrm_policy_list[dir]; xp; xp = xp->next) { 587 error = func(xp, dir%XFRM_POLICY_MAX, --count, data); 588 if (error) 589 goto out; 590 } 591 } 592 593 out: 594 read_unlock_bh(&xfrm_policy_lock); 595 return error; 596 } 597 EXPORT_SYMBOL(xfrm_policy_walk); 598 599 /* Find policy to apply to this flow. */ 600 601 static void xfrm_policy_lookup(struct flowi *fl, u32 sk_sid, u16 family, u8 dir, 602 void **objp, atomic_t **obj_refp) 603 { 604 struct xfrm_policy *pol; 605 606 read_lock_bh(&xfrm_policy_lock); 607 for (pol = xfrm_policy_list[dir]; pol; pol = pol->next) { 608 struct xfrm_selector *sel = &pol->selector; 609 int match; 610 611 if (pol->family != family) 612 continue; 613 614 match = xfrm_selector_match(sel, fl, family); 615 616 if (match) { 617 if (!security_xfrm_policy_lookup(pol, sk_sid, dir)) { 618 xfrm_pol_hold(pol); 619 break; 620 } 621 } 622 } 623 read_unlock_bh(&xfrm_policy_lock); 624 if ((*objp = (void *) pol) != NULL) 625 *obj_refp = &pol->refcnt; 626 } 627 628 static inline int policy_to_flow_dir(int dir) 629 { 630 if (XFRM_POLICY_IN == FLOW_DIR_IN && 631 XFRM_POLICY_OUT == FLOW_DIR_OUT && 632 XFRM_POLICY_FWD == FLOW_DIR_FWD) 633 return dir; 634 switch (dir) { 635 default: 636 case XFRM_POLICY_IN: 637 return FLOW_DIR_IN; 638 case XFRM_POLICY_OUT: 639 return FLOW_DIR_OUT; 640 case XFRM_POLICY_FWD: 641 return FLOW_DIR_FWD; 642 }; 643 } 644 645 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, struct flowi *fl, u32 sk_sid) 646 { 647 struct xfrm_policy *pol; 648 649 read_lock_bh(&xfrm_policy_lock); 650 if ((pol = sk->sk_policy[dir]) != NULL) { 651 int match = xfrm_selector_match(&pol->selector, fl, 652 sk->sk_family); 653 int err = 0; 654 655 if (match) 656 err = security_xfrm_policy_lookup(pol, sk_sid, policy_to_flow_dir(dir)); 657 658 if (match && !err) 659 xfrm_pol_hold(pol); 660 else 661 pol = NULL; 662 } 663 read_unlock_bh(&xfrm_policy_lock); 664 return pol; 665 } 666 667 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 668 { 669 pol->next = xfrm_policy_list[dir]; 670 xfrm_policy_list[dir] = pol; 671 xfrm_pol_hold(pol); 672 } 673 674 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 675 int dir) 676 { 677 struct xfrm_policy **polp; 678 679 for (polp = &xfrm_policy_list[dir]; 680 *polp != NULL; polp = &(*polp)->next) { 681 if (*polp == pol) { 682 *polp = pol->next; 683 return pol; 684 } 685 } 686 return NULL; 687 } 688 689 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 690 { 691 write_lock_bh(&xfrm_policy_lock); 692 pol = __xfrm_policy_unlink(pol, dir); 693 write_unlock_bh(&xfrm_policy_lock); 694 if (pol) { 695 if (dir < XFRM_POLICY_MAX) 696 atomic_inc(&flow_cache_genid); 697 xfrm_policy_kill(pol); 698 return 0; 699 } 700 return -ENOENT; 701 } 702 EXPORT_SYMBOL(xfrm_policy_delete); 703 704 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 705 { 706 struct xfrm_policy *old_pol; 707 708 write_lock_bh(&xfrm_policy_lock); 709 old_pol = sk->sk_policy[dir]; 710 sk->sk_policy[dir] = pol; 711 if (pol) { 712 pol->curlft.add_time = (unsigned long)xtime.tv_sec; 713 pol->index = xfrm_gen_index(XFRM_POLICY_MAX+dir); 714 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 715 } 716 if (old_pol) 717 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 718 write_unlock_bh(&xfrm_policy_lock); 719 720 if (old_pol) { 721 xfrm_policy_kill(old_pol); 722 } 723 return 0; 724 } 725 726 static struct xfrm_policy *clone_policy(struct xfrm_policy *old, int dir) 727 { 728 struct xfrm_policy *newp = xfrm_policy_alloc(GFP_ATOMIC); 729 730 if (newp) { 731 newp->selector = old->selector; 732 if (security_xfrm_policy_clone(old, newp)) { 733 kfree(newp); 734 return NULL; /* ENOMEM */ 735 } 736 newp->lft = old->lft; 737 newp->curlft = old->curlft; 738 newp->action = old->action; 739 newp->flags = old->flags; 740 newp->xfrm_nr = old->xfrm_nr; 741 newp->index = old->index; 742 memcpy(newp->xfrm_vec, old->xfrm_vec, 743 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 744 write_lock_bh(&xfrm_policy_lock); 745 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); 746 write_unlock_bh(&xfrm_policy_lock); 747 xfrm_pol_put(newp); 748 } 749 return newp; 750 } 751 752 int __xfrm_sk_clone_policy(struct sock *sk) 753 { 754 struct xfrm_policy *p0 = sk->sk_policy[0], 755 *p1 = sk->sk_policy[1]; 756 757 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 758 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 759 return -ENOMEM; 760 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 761 return -ENOMEM; 762 return 0; 763 } 764 765 /* Resolve list of templates for the flow, given policy. */ 766 767 static int 768 xfrm_tmpl_resolve(struct xfrm_policy *policy, struct flowi *fl, 769 struct xfrm_state **xfrm, 770 unsigned short family) 771 { 772 int nx; 773 int i, error; 774 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 775 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 776 777 for (nx=0, i = 0; i < policy->xfrm_nr; i++) { 778 struct xfrm_state *x; 779 xfrm_address_t *remote = daddr; 780 xfrm_address_t *local = saddr; 781 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 782 783 if (tmpl->mode) { 784 remote = &tmpl->id.daddr; 785 local = &tmpl->saddr; 786 } 787 788 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 789 790 if (x && x->km.state == XFRM_STATE_VALID) { 791 xfrm[nx++] = x; 792 daddr = remote; 793 saddr = local; 794 continue; 795 } 796 if (x) { 797 error = (x->km.state == XFRM_STATE_ERROR ? 798 -EINVAL : -EAGAIN); 799 xfrm_state_put(x); 800 } 801 802 if (!tmpl->optional) 803 goto fail; 804 } 805 return nx; 806 807 fail: 808 for (nx--; nx>=0; nx--) 809 xfrm_state_put(xfrm[nx]); 810 return error; 811 } 812 813 /* Check that the bundle accepts the flow and its components are 814 * still valid. 815 */ 816 817 static struct dst_entry * 818 xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family) 819 { 820 struct dst_entry *x; 821 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 822 if (unlikely(afinfo == NULL)) 823 return ERR_PTR(-EINVAL); 824 x = afinfo->find_bundle(fl, policy); 825 xfrm_policy_put_afinfo(afinfo); 826 return x; 827 } 828 829 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 830 * all the metrics... Shortly, bundle a bundle. 831 */ 832 833 static int 834 xfrm_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int nx, 835 struct flowi *fl, struct dst_entry **dst_p, 836 unsigned short family) 837 { 838 int err; 839 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 840 if (unlikely(afinfo == NULL)) 841 return -EINVAL; 842 err = afinfo->bundle_create(policy, xfrm, nx, fl, dst_p); 843 xfrm_policy_put_afinfo(afinfo); 844 return err; 845 } 846 847 848 static int stale_bundle(struct dst_entry *dst); 849 850 /* Main function: finds/creates a bundle for given flow. 851 * 852 * At the moment we eat a raw IP route. Mostly to speed up lookups 853 * on interfaces with disabled IPsec. 854 */ 855 int xfrm_lookup(struct dst_entry **dst_p, struct flowi *fl, 856 struct sock *sk, int flags) 857 { 858 struct xfrm_policy *policy; 859 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 860 struct dst_entry *dst, *dst_orig = *dst_p; 861 int nx = 0; 862 int err; 863 u32 genid; 864 u16 family; 865 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 866 u32 sk_sid = security_sk_sid(sk, fl, dir); 867 restart: 868 genid = atomic_read(&flow_cache_genid); 869 policy = NULL; 870 if (sk && sk->sk_policy[1]) 871 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, sk_sid); 872 873 if (!policy) { 874 /* To accelerate a bit... */ 875 if ((dst_orig->flags & DST_NOXFRM) || !xfrm_policy_list[XFRM_POLICY_OUT]) 876 return 0; 877 878 policy = flow_cache_lookup(fl, sk_sid, dst_orig->ops->family, 879 dir, xfrm_policy_lookup); 880 } 881 882 if (!policy) 883 return 0; 884 885 family = dst_orig->ops->family; 886 policy->curlft.use_time = (unsigned long)xtime.tv_sec; 887 888 switch (policy->action) { 889 case XFRM_POLICY_BLOCK: 890 /* Prohibit the flow */ 891 err = -EPERM; 892 goto error; 893 894 case XFRM_POLICY_ALLOW: 895 if (policy->xfrm_nr == 0) { 896 /* Flow passes not transformed. */ 897 xfrm_pol_put(policy); 898 return 0; 899 } 900 901 /* Try to find matching bundle. 902 * 903 * LATER: help from flow cache. It is optional, this 904 * is required only for output policy. 905 */ 906 dst = xfrm_find_bundle(fl, policy, family); 907 if (IS_ERR(dst)) { 908 err = PTR_ERR(dst); 909 goto error; 910 } 911 912 if (dst) 913 break; 914 915 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family); 916 917 if (unlikely(nx<0)) { 918 err = nx; 919 if (err == -EAGAIN && flags) { 920 DECLARE_WAITQUEUE(wait, current); 921 922 add_wait_queue(&km_waitq, &wait); 923 set_current_state(TASK_INTERRUPTIBLE); 924 schedule(); 925 set_current_state(TASK_RUNNING); 926 remove_wait_queue(&km_waitq, &wait); 927 928 nx = xfrm_tmpl_resolve(policy, fl, xfrm, family); 929 930 if (nx == -EAGAIN && signal_pending(current)) { 931 err = -ERESTART; 932 goto error; 933 } 934 if (nx == -EAGAIN || 935 genid != atomic_read(&flow_cache_genid)) { 936 xfrm_pol_put(policy); 937 goto restart; 938 } 939 err = nx; 940 } 941 if (err < 0) 942 goto error; 943 } 944 if (nx == 0) { 945 /* Flow passes not transformed. */ 946 xfrm_pol_put(policy); 947 return 0; 948 } 949 950 dst = dst_orig; 951 err = xfrm_bundle_create(policy, xfrm, nx, fl, &dst, family); 952 953 if (unlikely(err)) { 954 int i; 955 for (i=0; i<nx; i++) 956 xfrm_state_put(xfrm[i]); 957 goto error; 958 } 959 960 write_lock_bh(&policy->lock); 961 if (unlikely(policy->dead || stale_bundle(dst))) { 962 /* Wow! While we worked on resolving, this 963 * policy has gone. Retry. It is not paranoia, 964 * we just cannot enlist new bundle to dead object. 965 * We can't enlist stable bundles either. 966 */ 967 write_unlock_bh(&policy->lock); 968 if (dst) 969 dst_free(dst); 970 971 err = -EHOSTUNREACH; 972 goto error; 973 } 974 dst->next = policy->bundles; 975 policy->bundles = dst; 976 dst_hold(dst); 977 write_unlock_bh(&policy->lock); 978 } 979 *dst_p = dst; 980 dst_release(dst_orig); 981 xfrm_pol_put(policy); 982 return 0; 983 984 error: 985 dst_release(dst_orig); 986 xfrm_pol_put(policy); 987 *dst_p = NULL; 988 return err; 989 } 990 EXPORT_SYMBOL(xfrm_lookup); 991 992 /* When skb is transformed back to its "native" form, we have to 993 * check policy restrictions. At the moment we make this in maximally 994 * stupid way. Shame on me. :-) Of course, connected sockets must 995 * have policy cached at them. 996 */ 997 998 static inline int 999 xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x, 1000 unsigned short family) 1001 { 1002 if (xfrm_state_kern(x)) 1003 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, family); 1004 return x->id.proto == tmpl->id.proto && 1005 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1006 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1007 x->props.mode == tmpl->mode && 1008 (tmpl->aalgos & (1<<x->props.aalgo)) && 1009 !(x->props.mode && xfrm_state_addr_cmp(tmpl, x, family)); 1010 } 1011 1012 static inline int 1013 xfrm_policy_ok(struct xfrm_tmpl *tmpl, struct sec_path *sp, int start, 1014 unsigned short family) 1015 { 1016 int idx = start; 1017 1018 if (tmpl->optional) { 1019 if (!tmpl->mode) 1020 return start; 1021 } else 1022 start = -1; 1023 for (; idx < sp->len; idx++) { 1024 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 1025 return ++idx; 1026 if (sp->xvec[idx]->props.mode) 1027 break; 1028 } 1029 return start; 1030 } 1031 1032 int 1033 xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, unsigned short family) 1034 { 1035 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1036 1037 if (unlikely(afinfo == NULL)) 1038 return -EAFNOSUPPORT; 1039 1040 afinfo->decode_session(skb, fl); 1041 xfrm_policy_put_afinfo(afinfo); 1042 return 0; 1043 } 1044 EXPORT_SYMBOL(xfrm_decode_session); 1045 1046 static inline int secpath_has_tunnel(struct sec_path *sp, int k) 1047 { 1048 for (; k < sp->len; k++) { 1049 if (sp->xvec[k]->props.mode) 1050 return 1; 1051 } 1052 1053 return 0; 1054 } 1055 1056 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 1057 unsigned short family) 1058 { 1059 struct xfrm_policy *pol; 1060 struct flowi fl; 1061 u8 fl_dir = policy_to_flow_dir(dir); 1062 u32 sk_sid; 1063 1064 if (xfrm_decode_session(skb, &fl, family) < 0) 1065 return 0; 1066 nf_nat_decode_session(skb, &fl, family); 1067 1068 sk_sid = security_sk_sid(sk, &fl, fl_dir); 1069 1070 /* First, check used SA against their selectors. */ 1071 if (skb->sp) { 1072 int i; 1073 1074 for (i=skb->sp->len-1; i>=0; i--) { 1075 struct xfrm_state *x = skb->sp->xvec[i]; 1076 if (!xfrm_selector_match(&x->sel, &fl, family)) 1077 return 0; 1078 } 1079 } 1080 1081 pol = NULL; 1082 if (sk && sk->sk_policy[dir]) 1083 pol = xfrm_sk_policy_lookup(sk, dir, &fl, sk_sid); 1084 1085 if (!pol) 1086 pol = flow_cache_lookup(&fl, sk_sid, family, fl_dir, 1087 xfrm_policy_lookup); 1088 1089 if (!pol) 1090 return !skb->sp || !secpath_has_tunnel(skb->sp, 0); 1091 1092 pol->curlft.use_time = (unsigned long)xtime.tv_sec; 1093 1094 if (pol->action == XFRM_POLICY_ALLOW) { 1095 struct sec_path *sp; 1096 static struct sec_path dummy; 1097 int i, k; 1098 1099 if ((sp = skb->sp) == NULL) 1100 sp = &dummy; 1101 1102 /* For each tunnel xfrm, find the first matching tmpl. 1103 * For each tmpl before that, find corresponding xfrm. 1104 * Order is _important_. Later we will implement 1105 * some barriers, but at the moment barriers 1106 * are implied between each two transformations. 1107 */ 1108 for (i = pol->xfrm_nr-1, k = 0; i >= 0; i--) { 1109 k = xfrm_policy_ok(pol->xfrm_vec+i, sp, k, family); 1110 if (k < 0) 1111 goto reject; 1112 } 1113 1114 if (secpath_has_tunnel(sp, k)) 1115 goto reject; 1116 1117 xfrm_pol_put(pol); 1118 return 1; 1119 } 1120 1121 reject: 1122 xfrm_pol_put(pol); 1123 return 0; 1124 } 1125 EXPORT_SYMBOL(__xfrm_policy_check); 1126 1127 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 1128 { 1129 struct flowi fl; 1130 1131 if (xfrm_decode_session(skb, &fl, family) < 0) 1132 return 0; 1133 1134 return xfrm_lookup(&skb->dst, &fl, NULL, 0) == 0; 1135 } 1136 EXPORT_SYMBOL(__xfrm_route_forward); 1137 1138 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 1139 { 1140 /* If it is marked obsolete, which is how we even get here, 1141 * then we have purged it from the policy bundle list and we 1142 * did that for a good reason. 1143 */ 1144 return NULL; 1145 } 1146 1147 static int stale_bundle(struct dst_entry *dst) 1148 { 1149 return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC); 1150 } 1151 1152 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 1153 { 1154 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 1155 dst->dev = &loopback_dev; 1156 dev_hold(&loopback_dev); 1157 dev_put(dev); 1158 } 1159 } 1160 EXPORT_SYMBOL(xfrm_dst_ifdown); 1161 1162 static void xfrm_link_failure(struct sk_buff *skb) 1163 { 1164 /* Impossible. Such dst must be popped before reaches point of failure. */ 1165 return; 1166 } 1167 1168 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 1169 { 1170 if (dst) { 1171 if (dst->obsolete) { 1172 dst_release(dst); 1173 dst = NULL; 1174 } 1175 } 1176 return dst; 1177 } 1178 1179 static void xfrm_prune_bundles(int (*func)(struct dst_entry *)) 1180 { 1181 int i; 1182 struct xfrm_policy *pol; 1183 struct dst_entry *dst, **dstp, *gc_list = NULL; 1184 1185 read_lock_bh(&xfrm_policy_lock); 1186 for (i=0; i<2*XFRM_POLICY_MAX; i++) { 1187 for (pol = xfrm_policy_list[i]; pol; pol = pol->next) { 1188 write_lock(&pol->lock); 1189 dstp = &pol->bundles; 1190 while ((dst=*dstp) != NULL) { 1191 if (func(dst)) { 1192 *dstp = dst->next; 1193 dst->next = gc_list; 1194 gc_list = dst; 1195 } else { 1196 dstp = &dst->next; 1197 } 1198 } 1199 write_unlock(&pol->lock); 1200 } 1201 } 1202 read_unlock_bh(&xfrm_policy_lock); 1203 1204 while (gc_list) { 1205 dst = gc_list; 1206 gc_list = dst->next; 1207 dst_free(dst); 1208 } 1209 } 1210 1211 static int unused_bundle(struct dst_entry *dst) 1212 { 1213 return !atomic_read(&dst->__refcnt); 1214 } 1215 1216 static void __xfrm_garbage_collect(void) 1217 { 1218 xfrm_prune_bundles(unused_bundle); 1219 } 1220 1221 int xfrm_flush_bundles(void) 1222 { 1223 xfrm_prune_bundles(stale_bundle); 1224 return 0; 1225 } 1226 1227 static int always_true(struct dst_entry *dst) 1228 { 1229 return 1; 1230 } 1231 1232 void xfrm_flush_all_bundles(void) 1233 { 1234 xfrm_prune_bundles(always_true); 1235 } 1236 1237 void xfrm_init_pmtu(struct dst_entry *dst) 1238 { 1239 do { 1240 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1241 u32 pmtu, route_mtu_cached; 1242 1243 pmtu = dst_mtu(dst->child); 1244 xdst->child_mtu_cached = pmtu; 1245 1246 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 1247 1248 route_mtu_cached = dst_mtu(xdst->route); 1249 xdst->route_mtu_cached = route_mtu_cached; 1250 1251 if (pmtu > route_mtu_cached) 1252 pmtu = route_mtu_cached; 1253 1254 dst->metrics[RTAX_MTU-1] = pmtu; 1255 } while ((dst = dst->next)); 1256 } 1257 1258 EXPORT_SYMBOL(xfrm_init_pmtu); 1259 1260 /* Check that the bundle accepts the flow and its components are 1261 * still valid. 1262 */ 1263 1264 int xfrm_bundle_ok(struct xfrm_dst *first, struct flowi *fl, int family) 1265 { 1266 struct dst_entry *dst = &first->u.dst; 1267 struct xfrm_dst *last; 1268 u32 mtu; 1269 1270 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 1271 (dst->dev && !netif_running(dst->dev))) 1272 return 0; 1273 1274 last = NULL; 1275 1276 do { 1277 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1278 1279 if (fl && !xfrm_selector_match(&dst->xfrm->sel, fl, family)) 1280 return 0; 1281 if (dst->xfrm->km.state != XFRM_STATE_VALID) 1282 return 0; 1283 1284 mtu = dst_mtu(dst->child); 1285 if (xdst->child_mtu_cached != mtu) { 1286 last = xdst; 1287 xdst->child_mtu_cached = mtu; 1288 } 1289 1290 if (!dst_check(xdst->route, xdst->route_cookie)) 1291 return 0; 1292 mtu = dst_mtu(xdst->route); 1293 if (xdst->route_mtu_cached != mtu) { 1294 last = xdst; 1295 xdst->route_mtu_cached = mtu; 1296 } 1297 1298 dst = dst->child; 1299 } while (dst->xfrm); 1300 1301 if (likely(!last)) 1302 return 1; 1303 1304 mtu = last->child_mtu_cached; 1305 for (;;) { 1306 dst = &last->u.dst; 1307 1308 mtu = xfrm_state_mtu(dst->xfrm, mtu); 1309 if (mtu > last->route_mtu_cached) 1310 mtu = last->route_mtu_cached; 1311 dst->metrics[RTAX_MTU-1] = mtu; 1312 1313 if (last == first) 1314 break; 1315 1316 last = last->u.next; 1317 last->child_mtu_cached = mtu; 1318 } 1319 1320 return 1; 1321 } 1322 1323 EXPORT_SYMBOL(xfrm_bundle_ok); 1324 1325 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 1326 { 1327 int err = 0; 1328 if (unlikely(afinfo == NULL)) 1329 return -EINVAL; 1330 if (unlikely(afinfo->family >= NPROTO)) 1331 return -EAFNOSUPPORT; 1332 write_lock_bh(&xfrm_policy_afinfo_lock); 1333 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 1334 err = -ENOBUFS; 1335 else { 1336 struct dst_ops *dst_ops = afinfo->dst_ops; 1337 if (likely(dst_ops->kmem_cachep == NULL)) 1338 dst_ops->kmem_cachep = xfrm_dst_cache; 1339 if (likely(dst_ops->check == NULL)) 1340 dst_ops->check = xfrm_dst_check; 1341 if (likely(dst_ops->negative_advice == NULL)) 1342 dst_ops->negative_advice = xfrm_negative_advice; 1343 if (likely(dst_ops->link_failure == NULL)) 1344 dst_ops->link_failure = xfrm_link_failure; 1345 if (likely(afinfo->garbage_collect == NULL)) 1346 afinfo->garbage_collect = __xfrm_garbage_collect; 1347 xfrm_policy_afinfo[afinfo->family] = afinfo; 1348 } 1349 write_unlock_bh(&xfrm_policy_afinfo_lock); 1350 return err; 1351 } 1352 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 1353 1354 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) 1355 { 1356 int err = 0; 1357 if (unlikely(afinfo == NULL)) 1358 return -EINVAL; 1359 if (unlikely(afinfo->family >= NPROTO)) 1360 return -EAFNOSUPPORT; 1361 write_lock_bh(&xfrm_policy_afinfo_lock); 1362 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 1363 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 1364 err = -EINVAL; 1365 else { 1366 struct dst_ops *dst_ops = afinfo->dst_ops; 1367 xfrm_policy_afinfo[afinfo->family] = NULL; 1368 dst_ops->kmem_cachep = NULL; 1369 dst_ops->check = NULL; 1370 dst_ops->negative_advice = NULL; 1371 dst_ops->link_failure = NULL; 1372 afinfo->garbage_collect = NULL; 1373 } 1374 } 1375 write_unlock_bh(&xfrm_policy_afinfo_lock); 1376 return err; 1377 } 1378 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 1379 1380 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 1381 { 1382 struct xfrm_policy_afinfo *afinfo; 1383 if (unlikely(family >= NPROTO)) 1384 return NULL; 1385 read_lock(&xfrm_policy_afinfo_lock); 1386 afinfo = xfrm_policy_afinfo[family]; 1387 if (unlikely(!afinfo)) 1388 read_unlock(&xfrm_policy_afinfo_lock); 1389 return afinfo; 1390 } 1391 1392 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) 1393 { 1394 read_unlock(&xfrm_policy_afinfo_lock); 1395 } 1396 1397 static struct xfrm_policy_afinfo *xfrm_policy_lock_afinfo(unsigned int family) 1398 { 1399 struct xfrm_policy_afinfo *afinfo; 1400 if (unlikely(family >= NPROTO)) 1401 return NULL; 1402 write_lock_bh(&xfrm_policy_afinfo_lock); 1403 afinfo = xfrm_policy_afinfo[family]; 1404 if (unlikely(!afinfo)) 1405 write_unlock_bh(&xfrm_policy_afinfo_lock); 1406 return afinfo; 1407 } 1408 1409 static void xfrm_policy_unlock_afinfo(struct xfrm_policy_afinfo *afinfo) 1410 { 1411 write_unlock_bh(&xfrm_policy_afinfo_lock); 1412 } 1413 1414 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 1415 { 1416 switch (event) { 1417 case NETDEV_DOWN: 1418 xfrm_flush_bundles(); 1419 } 1420 return NOTIFY_DONE; 1421 } 1422 1423 static struct notifier_block xfrm_dev_notifier = { 1424 xfrm_dev_event, 1425 NULL, 1426 0 1427 }; 1428 1429 static void __init xfrm_policy_init(void) 1430 { 1431 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 1432 sizeof(struct xfrm_dst), 1433 0, SLAB_HWCACHE_ALIGN, 1434 NULL, NULL); 1435 if (!xfrm_dst_cache) 1436 panic("XFRM: failed to allocate xfrm_dst_cache\n"); 1437 1438 INIT_WORK(&xfrm_policy_gc_work, xfrm_policy_gc_task, NULL); 1439 register_netdevice_notifier(&xfrm_dev_notifier); 1440 } 1441 1442 void __init xfrm_init(void) 1443 { 1444 xfrm_state_init(); 1445 xfrm_policy_init(); 1446 xfrm_input_init(); 1447 } 1448 1449