1 /* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16 #include <linux/err.h> 17 #include <linux/slab.h> 18 #include <linux/kmod.h> 19 #include <linux/list.h> 20 #include <linux/spinlock.h> 21 #include <linux/workqueue.h> 22 #include <linux/notifier.h> 23 #include <linux/netdevice.h> 24 #include <linux/netfilter.h> 25 #include <linux/module.h> 26 #include <linux/cache.h> 27 #include <linux/audit.h> 28 #include <net/dst.h> 29 #include <net/flow.h> 30 #include <net/xfrm.h> 31 #include <net/ip.h> 32 #ifdef CONFIG_XFRM_STATISTICS 33 #include <net/snmp.h> 34 #endif 35 36 #include "xfrm_hash.h" 37 38 #define XFRM_QUEUE_TMO_MIN ((unsigned)(HZ/10)) 39 #define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ)) 40 #define XFRM_MAX_QUEUE_LEN 100 41 42 struct xfrm_flo { 43 struct dst_entry *dst_orig; 44 u8 flags; 45 }; 46 47 static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock); 48 static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO] 49 __read_mostly; 50 51 static struct kmem_cache *xfrm_dst_cache __read_mostly; 52 53 static void xfrm_init_pmtu(struct dst_entry *dst); 54 static int stale_bundle(struct dst_entry *dst); 55 static int xfrm_bundle_ok(struct xfrm_dst *xdst); 56 static void xfrm_policy_queue_process(unsigned long arg); 57 58 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir); 59 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 60 int dir); 61 62 static inline bool 63 __xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 64 { 65 const struct flowi4 *fl4 = &fl->u.ip4; 66 67 return addr4_match(fl4->daddr, sel->daddr.a4, sel->prefixlen_d) && 68 addr4_match(fl4->saddr, sel->saddr.a4, sel->prefixlen_s) && 69 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && 70 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && 71 (fl4->flowi4_proto == sel->proto || !sel->proto) && 72 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 73 } 74 75 static inline bool 76 __xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 77 { 78 const struct flowi6 *fl6 = &fl->u.ip6; 79 80 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && 81 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && 82 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && 83 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && 84 (fl6->flowi6_proto == sel->proto || !sel->proto) && 85 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 86 } 87 88 bool xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 89 unsigned short family) 90 { 91 switch (family) { 92 case AF_INET: 93 return __xfrm4_selector_match(sel, fl); 94 case AF_INET6: 95 return __xfrm6_selector_match(sel, fl); 96 } 97 return false; 98 } 99 100 static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 101 { 102 struct xfrm_policy_afinfo *afinfo; 103 104 if (unlikely(family >= NPROTO)) 105 return NULL; 106 rcu_read_lock(); 107 afinfo = rcu_dereference(xfrm_policy_afinfo[family]); 108 if (unlikely(!afinfo)) 109 rcu_read_unlock(); 110 return afinfo; 111 } 112 113 static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) 114 { 115 rcu_read_unlock(); 116 } 117 118 static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, 119 const xfrm_address_t *saddr, 120 const xfrm_address_t *daddr, 121 int family) 122 { 123 struct xfrm_policy_afinfo *afinfo; 124 struct dst_entry *dst; 125 126 afinfo = xfrm_policy_get_afinfo(family); 127 if (unlikely(afinfo == NULL)) 128 return ERR_PTR(-EAFNOSUPPORT); 129 130 dst = afinfo->dst_lookup(net, tos, saddr, daddr); 131 132 xfrm_policy_put_afinfo(afinfo); 133 134 return dst; 135 } 136 137 static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, 138 xfrm_address_t *prev_saddr, 139 xfrm_address_t *prev_daddr, 140 int family) 141 { 142 struct net *net = xs_net(x); 143 xfrm_address_t *saddr = &x->props.saddr; 144 xfrm_address_t *daddr = &x->id.daddr; 145 struct dst_entry *dst; 146 147 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 148 saddr = x->coaddr; 149 daddr = prev_daddr; 150 } 151 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 152 saddr = prev_saddr; 153 daddr = x->coaddr; 154 } 155 156 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family); 157 158 if (!IS_ERR(dst)) { 159 if (prev_saddr != saddr) 160 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 161 if (prev_daddr != daddr) 162 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 163 } 164 165 return dst; 166 } 167 168 static inline unsigned long make_jiffies(long secs) 169 { 170 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 171 return MAX_SCHEDULE_TIMEOUT-1; 172 else 173 return secs*HZ; 174 } 175 176 static void xfrm_policy_timer(unsigned long data) 177 { 178 struct xfrm_policy *xp = (struct xfrm_policy *)data; 179 unsigned long now = get_seconds(); 180 long next = LONG_MAX; 181 int warn = 0; 182 int dir; 183 184 read_lock(&xp->lock); 185 186 if (unlikely(xp->walk.dead)) 187 goto out; 188 189 dir = xfrm_policy_id2dir(xp->index); 190 191 if (xp->lft.hard_add_expires_seconds) { 192 long tmo = xp->lft.hard_add_expires_seconds + 193 xp->curlft.add_time - now; 194 if (tmo <= 0) 195 goto expired; 196 if (tmo < next) 197 next = tmo; 198 } 199 if (xp->lft.hard_use_expires_seconds) { 200 long tmo = xp->lft.hard_use_expires_seconds + 201 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 202 if (tmo <= 0) 203 goto expired; 204 if (tmo < next) 205 next = tmo; 206 } 207 if (xp->lft.soft_add_expires_seconds) { 208 long tmo = xp->lft.soft_add_expires_seconds + 209 xp->curlft.add_time - now; 210 if (tmo <= 0) { 211 warn = 1; 212 tmo = XFRM_KM_TIMEOUT; 213 } 214 if (tmo < next) 215 next = tmo; 216 } 217 if (xp->lft.soft_use_expires_seconds) { 218 long tmo = xp->lft.soft_use_expires_seconds + 219 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 220 if (tmo <= 0) { 221 warn = 1; 222 tmo = XFRM_KM_TIMEOUT; 223 } 224 if (tmo < next) 225 next = tmo; 226 } 227 228 if (warn) 229 km_policy_expired(xp, dir, 0, 0); 230 if (next != LONG_MAX && 231 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 232 xfrm_pol_hold(xp); 233 234 out: 235 read_unlock(&xp->lock); 236 xfrm_pol_put(xp); 237 return; 238 239 expired: 240 read_unlock(&xp->lock); 241 if (!xfrm_policy_delete(xp, dir)) 242 km_policy_expired(xp, dir, 1, 0); 243 xfrm_pol_put(xp); 244 } 245 246 static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) 247 { 248 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 249 250 if (unlikely(pol->walk.dead)) 251 flo = NULL; 252 else 253 xfrm_pol_hold(pol); 254 255 return flo; 256 } 257 258 static int xfrm_policy_flo_check(struct flow_cache_object *flo) 259 { 260 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 261 262 return !pol->walk.dead; 263 } 264 265 static void xfrm_policy_flo_delete(struct flow_cache_object *flo) 266 { 267 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); 268 } 269 270 static const struct flow_cache_ops xfrm_policy_fc_ops = { 271 .get = xfrm_policy_flo_get, 272 .check = xfrm_policy_flo_check, 273 .delete = xfrm_policy_flo_delete, 274 }; 275 276 /* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 277 * SPD calls. 278 */ 279 280 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) 281 { 282 struct xfrm_policy *policy; 283 284 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 285 286 if (policy) { 287 write_pnet(&policy->xp_net, net); 288 INIT_LIST_HEAD(&policy->walk.all); 289 INIT_HLIST_NODE(&policy->bydst); 290 INIT_HLIST_NODE(&policy->byidx); 291 rwlock_init(&policy->lock); 292 atomic_set(&policy->refcnt, 1); 293 skb_queue_head_init(&policy->polq.hold_queue); 294 setup_timer(&policy->timer, xfrm_policy_timer, 295 (unsigned long)policy); 296 setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process, 297 (unsigned long)policy); 298 policy->flo.ops = &xfrm_policy_fc_ops; 299 } 300 return policy; 301 } 302 EXPORT_SYMBOL(xfrm_policy_alloc); 303 304 /* Destroy xfrm_policy: descendant resources must be released to this moment. */ 305 306 void xfrm_policy_destroy(struct xfrm_policy *policy) 307 { 308 BUG_ON(!policy->walk.dead); 309 310 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) 311 BUG(); 312 313 security_xfrm_policy_free(policy->security); 314 kfree(policy); 315 } 316 EXPORT_SYMBOL(xfrm_policy_destroy); 317 318 /* Rule must be locked. Release descentant resources, announce 319 * entry dead. The rule must be unlinked from lists to the moment. 320 */ 321 322 static void xfrm_policy_kill(struct xfrm_policy *policy) 323 { 324 policy->walk.dead = 1; 325 326 atomic_inc(&policy->genid); 327 328 if (del_timer(&policy->polq.hold_timer)) 329 xfrm_pol_put(policy); 330 skb_queue_purge(&policy->polq.hold_queue); 331 332 if (del_timer(&policy->timer)) 333 xfrm_pol_put(policy); 334 335 xfrm_pol_put(policy); 336 } 337 338 static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 339 340 static inline unsigned int idx_hash(struct net *net, u32 index) 341 { 342 return __idx_hash(index, net->xfrm.policy_idx_hmask); 343 } 344 345 /* calculate policy hash thresholds */ 346 static void __get_hash_thresh(struct net *net, 347 unsigned short family, int dir, 348 u8 *dbits, u8 *sbits) 349 { 350 switch (family) { 351 case AF_INET: 352 *dbits = net->xfrm.policy_bydst[dir].dbits4; 353 *sbits = net->xfrm.policy_bydst[dir].sbits4; 354 break; 355 356 case AF_INET6: 357 *dbits = net->xfrm.policy_bydst[dir].dbits6; 358 *sbits = net->xfrm.policy_bydst[dir].sbits6; 359 break; 360 361 default: 362 *dbits = 0; 363 *sbits = 0; 364 } 365 } 366 367 static struct hlist_head *policy_hash_bysel(struct net *net, 368 const struct xfrm_selector *sel, 369 unsigned short family, int dir) 370 { 371 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 372 unsigned int hash; 373 u8 dbits; 374 u8 sbits; 375 376 __get_hash_thresh(net, family, dir, &dbits, &sbits); 377 hash = __sel_hash(sel, family, hmask, dbits, sbits); 378 379 return (hash == hmask + 1 ? 380 &net->xfrm.policy_inexact[dir] : 381 net->xfrm.policy_bydst[dir].table + hash); 382 } 383 384 static struct hlist_head *policy_hash_direct(struct net *net, 385 const xfrm_address_t *daddr, 386 const xfrm_address_t *saddr, 387 unsigned short family, int dir) 388 { 389 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 390 unsigned int hash; 391 u8 dbits; 392 u8 sbits; 393 394 __get_hash_thresh(net, family, dir, &dbits, &sbits); 395 hash = __addr_hash(daddr, saddr, family, hmask, dbits, sbits); 396 397 return net->xfrm.policy_bydst[dir].table + hash; 398 } 399 400 static void xfrm_dst_hash_transfer(struct net *net, 401 struct hlist_head *list, 402 struct hlist_head *ndsttable, 403 unsigned int nhashmask, 404 int dir) 405 { 406 struct hlist_node *tmp, *entry0 = NULL; 407 struct xfrm_policy *pol; 408 unsigned int h0 = 0; 409 u8 dbits; 410 u8 sbits; 411 412 redo: 413 hlist_for_each_entry_safe(pol, tmp, list, bydst) { 414 unsigned int h; 415 416 __get_hash_thresh(net, pol->family, dir, &dbits, &sbits); 417 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 418 pol->family, nhashmask, dbits, sbits); 419 if (!entry0) { 420 hlist_del(&pol->bydst); 421 hlist_add_head(&pol->bydst, ndsttable+h); 422 h0 = h; 423 } else { 424 if (h != h0) 425 continue; 426 hlist_del(&pol->bydst); 427 hlist_add_behind(&pol->bydst, entry0); 428 } 429 entry0 = &pol->bydst; 430 } 431 if (!hlist_empty(list)) { 432 entry0 = NULL; 433 goto redo; 434 } 435 } 436 437 static void xfrm_idx_hash_transfer(struct hlist_head *list, 438 struct hlist_head *nidxtable, 439 unsigned int nhashmask) 440 { 441 struct hlist_node *tmp; 442 struct xfrm_policy *pol; 443 444 hlist_for_each_entry_safe(pol, tmp, list, byidx) { 445 unsigned int h; 446 447 h = __idx_hash(pol->index, nhashmask); 448 hlist_add_head(&pol->byidx, nidxtable+h); 449 } 450 } 451 452 static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 453 { 454 return ((old_hmask + 1) << 1) - 1; 455 } 456 457 static void xfrm_bydst_resize(struct net *net, int dir) 458 { 459 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 460 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 461 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 462 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table; 463 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 464 int i; 465 466 if (!ndst) 467 return; 468 469 write_lock_bh(&net->xfrm.xfrm_policy_lock); 470 471 for (i = hmask; i >= 0; i--) 472 xfrm_dst_hash_transfer(net, odst + i, ndst, nhashmask, dir); 473 474 net->xfrm.policy_bydst[dir].table = ndst; 475 net->xfrm.policy_bydst[dir].hmask = nhashmask; 476 477 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 478 479 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 480 } 481 482 static void xfrm_byidx_resize(struct net *net, int total) 483 { 484 unsigned int hmask = net->xfrm.policy_idx_hmask; 485 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 486 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 487 struct hlist_head *oidx = net->xfrm.policy_byidx; 488 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 489 int i; 490 491 if (!nidx) 492 return; 493 494 write_lock_bh(&net->xfrm.xfrm_policy_lock); 495 496 for (i = hmask; i >= 0; i--) 497 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 498 499 net->xfrm.policy_byidx = nidx; 500 net->xfrm.policy_idx_hmask = nhashmask; 501 502 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 503 504 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 505 } 506 507 static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) 508 { 509 unsigned int cnt = net->xfrm.policy_count[dir]; 510 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 511 512 if (total) 513 *total += cnt; 514 515 if ((hmask + 1) < xfrm_policy_hashmax && 516 cnt > hmask) 517 return 1; 518 519 return 0; 520 } 521 522 static inline int xfrm_byidx_should_resize(struct net *net, int total) 523 { 524 unsigned int hmask = net->xfrm.policy_idx_hmask; 525 526 if ((hmask + 1) < xfrm_policy_hashmax && 527 total > hmask) 528 return 1; 529 530 return 0; 531 } 532 533 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 534 { 535 read_lock_bh(&net->xfrm.xfrm_policy_lock); 536 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 537 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 538 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 539 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 540 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 541 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 542 si->spdhcnt = net->xfrm.policy_idx_hmask; 543 si->spdhmcnt = xfrm_policy_hashmax; 544 read_unlock_bh(&net->xfrm.xfrm_policy_lock); 545 } 546 EXPORT_SYMBOL(xfrm_spd_getinfo); 547 548 static DEFINE_MUTEX(hash_resize_mutex); 549 static void xfrm_hash_resize(struct work_struct *work) 550 { 551 struct net *net = container_of(work, struct net, xfrm.policy_hash_work); 552 int dir, total; 553 554 mutex_lock(&hash_resize_mutex); 555 556 total = 0; 557 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 558 if (xfrm_bydst_should_resize(net, dir, &total)) 559 xfrm_bydst_resize(net, dir); 560 } 561 if (xfrm_byidx_should_resize(net, total)) 562 xfrm_byidx_resize(net, total); 563 564 mutex_unlock(&hash_resize_mutex); 565 } 566 567 static void xfrm_hash_rebuild(struct work_struct *work) 568 { 569 struct net *net = container_of(work, struct net, 570 xfrm.policy_hthresh.work); 571 unsigned int hmask; 572 struct xfrm_policy *pol; 573 struct xfrm_policy *policy; 574 struct hlist_head *chain; 575 struct hlist_head *odst; 576 struct hlist_node *newpos; 577 int i; 578 int dir; 579 unsigned seq; 580 u8 lbits4, rbits4, lbits6, rbits6; 581 582 mutex_lock(&hash_resize_mutex); 583 584 /* read selector prefixlen thresholds */ 585 do { 586 seq = read_seqbegin(&net->xfrm.policy_hthresh.lock); 587 588 lbits4 = net->xfrm.policy_hthresh.lbits4; 589 rbits4 = net->xfrm.policy_hthresh.rbits4; 590 lbits6 = net->xfrm.policy_hthresh.lbits6; 591 rbits6 = net->xfrm.policy_hthresh.rbits6; 592 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); 593 594 write_lock_bh(&net->xfrm.xfrm_policy_lock); 595 596 /* reset the bydst and inexact table in all directions */ 597 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 598 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 599 hmask = net->xfrm.policy_bydst[dir].hmask; 600 odst = net->xfrm.policy_bydst[dir].table; 601 for (i = hmask; i >= 0; i--) 602 INIT_HLIST_HEAD(odst + i); 603 if ((dir & XFRM_POLICY_MASK) == XFRM_POLICY_OUT) { 604 /* dir out => dst = remote, src = local */ 605 net->xfrm.policy_bydst[dir].dbits4 = rbits4; 606 net->xfrm.policy_bydst[dir].sbits4 = lbits4; 607 net->xfrm.policy_bydst[dir].dbits6 = rbits6; 608 net->xfrm.policy_bydst[dir].sbits6 = lbits6; 609 } else { 610 /* dir in/fwd => dst = local, src = remote */ 611 net->xfrm.policy_bydst[dir].dbits4 = lbits4; 612 net->xfrm.policy_bydst[dir].sbits4 = rbits4; 613 net->xfrm.policy_bydst[dir].dbits6 = lbits6; 614 net->xfrm.policy_bydst[dir].sbits6 = rbits6; 615 } 616 } 617 618 /* re-insert all policies by order of creation */ 619 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { 620 newpos = NULL; 621 chain = policy_hash_bysel(net, &policy->selector, 622 policy->family, 623 xfrm_policy_id2dir(policy->index)); 624 hlist_for_each_entry(pol, chain, bydst) { 625 if (policy->priority >= pol->priority) 626 newpos = &pol->bydst; 627 else 628 break; 629 } 630 if (newpos) 631 hlist_add_behind(&policy->bydst, newpos); 632 else 633 hlist_add_head(&policy->bydst, chain); 634 } 635 636 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 637 638 mutex_unlock(&hash_resize_mutex); 639 } 640 641 void xfrm_policy_hash_rebuild(struct net *net) 642 { 643 schedule_work(&net->xfrm.policy_hthresh.work); 644 } 645 EXPORT_SYMBOL(xfrm_policy_hash_rebuild); 646 647 /* Generate new index... KAME seems to generate them ordered by cost 648 * of an absolute inpredictability of ordering of rules. This will not pass. */ 649 static u32 xfrm_gen_index(struct net *net, int dir, u32 index) 650 { 651 static u32 idx_generator; 652 653 for (;;) { 654 struct hlist_head *list; 655 struct xfrm_policy *p; 656 u32 idx; 657 int found; 658 659 if (!index) { 660 idx = (idx_generator | dir); 661 idx_generator += 8; 662 } else { 663 idx = index; 664 index = 0; 665 } 666 667 if (idx == 0) 668 idx = 8; 669 list = net->xfrm.policy_byidx + idx_hash(net, idx); 670 found = 0; 671 hlist_for_each_entry(p, list, byidx) { 672 if (p->index == idx) { 673 found = 1; 674 break; 675 } 676 } 677 if (!found) 678 return idx; 679 } 680 } 681 682 static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 683 { 684 u32 *p1 = (u32 *) s1; 685 u32 *p2 = (u32 *) s2; 686 int len = sizeof(struct xfrm_selector) / sizeof(u32); 687 int i; 688 689 for (i = 0; i < len; i++) { 690 if (p1[i] != p2[i]) 691 return 1; 692 } 693 694 return 0; 695 } 696 697 static void xfrm_policy_requeue(struct xfrm_policy *old, 698 struct xfrm_policy *new) 699 { 700 struct xfrm_policy_queue *pq = &old->polq; 701 struct sk_buff_head list; 702 703 if (skb_queue_empty(&pq->hold_queue)) 704 return; 705 706 __skb_queue_head_init(&list); 707 708 spin_lock_bh(&pq->hold_queue.lock); 709 skb_queue_splice_init(&pq->hold_queue, &list); 710 if (del_timer(&pq->hold_timer)) 711 xfrm_pol_put(old); 712 spin_unlock_bh(&pq->hold_queue.lock); 713 714 pq = &new->polq; 715 716 spin_lock_bh(&pq->hold_queue.lock); 717 skb_queue_splice(&list, &pq->hold_queue); 718 pq->timeout = XFRM_QUEUE_TMO_MIN; 719 if (!mod_timer(&pq->hold_timer, jiffies)) 720 xfrm_pol_hold(new); 721 spin_unlock_bh(&pq->hold_queue.lock); 722 } 723 724 static bool xfrm_policy_mark_match(struct xfrm_policy *policy, 725 struct xfrm_policy *pol) 726 { 727 u32 mark = policy->mark.v & policy->mark.m; 728 729 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m) 730 return true; 731 732 if ((mark & pol->mark.m) == pol->mark.v && 733 policy->priority == pol->priority) 734 return true; 735 736 return false; 737 } 738 739 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 740 { 741 struct net *net = xp_net(policy); 742 struct xfrm_policy *pol; 743 struct xfrm_policy *delpol; 744 struct hlist_head *chain; 745 struct hlist_node *newpos; 746 747 write_lock_bh(&net->xfrm.xfrm_policy_lock); 748 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 749 delpol = NULL; 750 newpos = NULL; 751 hlist_for_each_entry(pol, chain, bydst) { 752 if (pol->type == policy->type && 753 !selector_cmp(&pol->selector, &policy->selector) && 754 xfrm_policy_mark_match(policy, pol) && 755 xfrm_sec_ctx_match(pol->security, policy->security) && 756 !WARN_ON(delpol)) { 757 if (excl) { 758 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 759 return -EEXIST; 760 } 761 delpol = pol; 762 if (policy->priority > pol->priority) 763 continue; 764 } else if (policy->priority >= pol->priority) { 765 newpos = &pol->bydst; 766 continue; 767 } 768 if (delpol) 769 break; 770 } 771 if (newpos) 772 hlist_add_behind(&policy->bydst, newpos); 773 else 774 hlist_add_head(&policy->bydst, chain); 775 __xfrm_policy_link(policy, dir); 776 atomic_inc(&net->xfrm.flow_cache_genid); 777 778 /* After previous checking, family can either be AF_INET or AF_INET6 */ 779 if (policy->family == AF_INET) 780 rt_genid_bump_ipv4(net); 781 else 782 rt_genid_bump_ipv6(net); 783 784 if (delpol) { 785 xfrm_policy_requeue(delpol, policy); 786 __xfrm_policy_unlink(delpol, dir); 787 } 788 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index); 789 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 790 policy->curlft.add_time = get_seconds(); 791 policy->curlft.use_time = 0; 792 if (!mod_timer(&policy->timer, jiffies + HZ)) 793 xfrm_pol_hold(policy); 794 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 795 796 if (delpol) 797 xfrm_policy_kill(delpol); 798 else if (xfrm_bydst_should_resize(net, dir, NULL)) 799 schedule_work(&net->xfrm.policy_hash_work); 800 801 return 0; 802 } 803 EXPORT_SYMBOL(xfrm_policy_insert); 804 805 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, 806 int dir, struct xfrm_selector *sel, 807 struct xfrm_sec_ctx *ctx, int delete, 808 int *err) 809 { 810 struct xfrm_policy *pol, *ret; 811 struct hlist_head *chain; 812 813 *err = 0; 814 write_lock_bh(&net->xfrm.xfrm_policy_lock); 815 chain = policy_hash_bysel(net, sel, sel->family, dir); 816 ret = NULL; 817 hlist_for_each_entry(pol, chain, bydst) { 818 if (pol->type == type && 819 (mark & pol->mark.m) == pol->mark.v && 820 !selector_cmp(sel, &pol->selector) && 821 xfrm_sec_ctx_match(ctx, pol->security)) { 822 xfrm_pol_hold(pol); 823 if (delete) { 824 *err = security_xfrm_policy_delete( 825 pol->security); 826 if (*err) { 827 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 828 return pol; 829 } 830 __xfrm_policy_unlink(pol, dir); 831 } 832 ret = pol; 833 break; 834 } 835 } 836 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 837 838 if (ret && delete) 839 xfrm_policy_kill(ret); 840 return ret; 841 } 842 EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 843 844 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, 845 int dir, u32 id, int delete, int *err) 846 { 847 struct xfrm_policy *pol, *ret; 848 struct hlist_head *chain; 849 850 *err = -ENOENT; 851 if (xfrm_policy_id2dir(id) != dir) 852 return NULL; 853 854 *err = 0; 855 write_lock_bh(&net->xfrm.xfrm_policy_lock); 856 chain = net->xfrm.policy_byidx + idx_hash(net, id); 857 ret = NULL; 858 hlist_for_each_entry(pol, chain, byidx) { 859 if (pol->type == type && pol->index == id && 860 (mark & pol->mark.m) == pol->mark.v) { 861 xfrm_pol_hold(pol); 862 if (delete) { 863 *err = security_xfrm_policy_delete( 864 pol->security); 865 if (*err) { 866 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 867 return pol; 868 } 869 __xfrm_policy_unlink(pol, dir); 870 } 871 ret = pol; 872 break; 873 } 874 } 875 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 876 877 if (ret && delete) 878 xfrm_policy_kill(ret); 879 return ret; 880 } 881 EXPORT_SYMBOL(xfrm_policy_byid); 882 883 #ifdef CONFIG_SECURITY_NETWORK_XFRM 884 static inline int 885 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid) 886 { 887 int dir, err = 0; 888 889 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 890 struct xfrm_policy *pol; 891 int i; 892 893 hlist_for_each_entry(pol, 894 &net->xfrm.policy_inexact[dir], bydst) { 895 if (pol->type != type) 896 continue; 897 err = security_xfrm_policy_delete(pol->security); 898 if (err) { 899 xfrm_audit_policy_delete(pol, 0, task_valid); 900 return err; 901 } 902 } 903 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 904 hlist_for_each_entry(pol, 905 net->xfrm.policy_bydst[dir].table + i, 906 bydst) { 907 if (pol->type != type) 908 continue; 909 err = security_xfrm_policy_delete( 910 pol->security); 911 if (err) { 912 xfrm_audit_policy_delete(pol, 0, 913 task_valid); 914 return err; 915 } 916 } 917 } 918 } 919 return err; 920 } 921 #else 922 static inline int 923 xfrm_policy_flush_secctx_check(struct net *net, u8 type, bool task_valid) 924 { 925 return 0; 926 } 927 #endif 928 929 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid) 930 { 931 int dir, err = 0, cnt = 0; 932 933 write_lock_bh(&net->xfrm.xfrm_policy_lock); 934 935 err = xfrm_policy_flush_secctx_check(net, type, task_valid); 936 if (err) 937 goto out; 938 939 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 940 struct xfrm_policy *pol; 941 int i; 942 943 again1: 944 hlist_for_each_entry(pol, 945 &net->xfrm.policy_inexact[dir], bydst) { 946 if (pol->type != type) 947 continue; 948 __xfrm_policy_unlink(pol, dir); 949 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 950 cnt++; 951 952 xfrm_audit_policy_delete(pol, 1, task_valid); 953 954 xfrm_policy_kill(pol); 955 956 write_lock_bh(&net->xfrm.xfrm_policy_lock); 957 goto again1; 958 } 959 960 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 961 again2: 962 hlist_for_each_entry(pol, 963 net->xfrm.policy_bydst[dir].table + i, 964 bydst) { 965 if (pol->type != type) 966 continue; 967 __xfrm_policy_unlink(pol, dir); 968 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 969 cnt++; 970 971 xfrm_audit_policy_delete(pol, 1, task_valid); 972 xfrm_policy_kill(pol); 973 974 write_lock_bh(&net->xfrm.xfrm_policy_lock); 975 goto again2; 976 } 977 } 978 979 } 980 if (!cnt) 981 err = -ESRCH; 982 out: 983 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 984 return err; 985 } 986 EXPORT_SYMBOL(xfrm_policy_flush); 987 988 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 989 int (*func)(struct xfrm_policy *, int, int, void*), 990 void *data) 991 { 992 struct xfrm_policy *pol; 993 struct xfrm_policy_walk_entry *x; 994 int error = 0; 995 996 if (walk->type >= XFRM_POLICY_TYPE_MAX && 997 walk->type != XFRM_POLICY_TYPE_ANY) 998 return -EINVAL; 999 1000 if (list_empty(&walk->walk.all) && walk->seq != 0) 1001 return 0; 1002 1003 write_lock_bh(&net->xfrm.xfrm_policy_lock); 1004 if (list_empty(&walk->walk.all)) 1005 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); 1006 else 1007 x = list_first_entry(&walk->walk.all, 1008 struct xfrm_policy_walk_entry, all); 1009 1010 list_for_each_entry_from(x, &net->xfrm.policy_all, all) { 1011 if (x->dead) 1012 continue; 1013 pol = container_of(x, struct xfrm_policy, walk); 1014 if (walk->type != XFRM_POLICY_TYPE_ANY && 1015 walk->type != pol->type) 1016 continue; 1017 error = func(pol, xfrm_policy_id2dir(pol->index), 1018 walk->seq, data); 1019 if (error) { 1020 list_move_tail(&walk->walk.all, &x->all); 1021 goto out; 1022 } 1023 walk->seq++; 1024 } 1025 if (walk->seq == 0) { 1026 error = -ENOENT; 1027 goto out; 1028 } 1029 list_del_init(&walk->walk.all); 1030 out: 1031 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 1032 return error; 1033 } 1034 EXPORT_SYMBOL(xfrm_policy_walk); 1035 1036 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) 1037 { 1038 INIT_LIST_HEAD(&walk->walk.all); 1039 walk->walk.dead = 1; 1040 walk->type = type; 1041 walk->seq = 0; 1042 } 1043 EXPORT_SYMBOL(xfrm_policy_walk_init); 1044 1045 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net) 1046 { 1047 if (list_empty(&walk->walk.all)) 1048 return; 1049 1050 write_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME where is net? */ 1051 list_del(&walk->walk.all); 1052 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 1053 } 1054 EXPORT_SYMBOL(xfrm_policy_walk_done); 1055 1056 /* 1057 * Find policy to apply to this flow. 1058 * 1059 * Returns 0 if policy found, else an -errno. 1060 */ 1061 static int xfrm_policy_match(const struct xfrm_policy *pol, 1062 const struct flowi *fl, 1063 u8 type, u16 family, int dir) 1064 { 1065 const struct xfrm_selector *sel = &pol->selector; 1066 int ret = -ESRCH; 1067 bool match; 1068 1069 if (pol->family != family || 1070 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 1071 pol->type != type) 1072 return ret; 1073 1074 match = xfrm_selector_match(sel, fl, family); 1075 if (match) 1076 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, 1077 dir); 1078 1079 return ret; 1080 } 1081 1082 static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, 1083 const struct flowi *fl, 1084 u16 family, u8 dir) 1085 { 1086 int err; 1087 struct xfrm_policy *pol, *ret; 1088 const xfrm_address_t *daddr, *saddr; 1089 struct hlist_head *chain; 1090 u32 priority = ~0U; 1091 1092 daddr = xfrm_flowi_daddr(fl, family); 1093 saddr = xfrm_flowi_saddr(fl, family); 1094 if (unlikely(!daddr || !saddr)) 1095 return NULL; 1096 1097 read_lock_bh(&net->xfrm.xfrm_policy_lock); 1098 chain = policy_hash_direct(net, daddr, saddr, family, dir); 1099 ret = NULL; 1100 hlist_for_each_entry(pol, chain, bydst) { 1101 err = xfrm_policy_match(pol, fl, type, family, dir); 1102 if (err) { 1103 if (err == -ESRCH) 1104 continue; 1105 else { 1106 ret = ERR_PTR(err); 1107 goto fail; 1108 } 1109 } else { 1110 ret = pol; 1111 priority = ret->priority; 1112 break; 1113 } 1114 } 1115 chain = &net->xfrm.policy_inexact[dir]; 1116 hlist_for_each_entry(pol, chain, bydst) { 1117 if ((pol->priority >= priority) && ret) 1118 break; 1119 1120 err = xfrm_policy_match(pol, fl, type, family, dir); 1121 if (err) { 1122 if (err == -ESRCH) 1123 continue; 1124 else { 1125 ret = ERR_PTR(err); 1126 goto fail; 1127 } 1128 } else { 1129 ret = pol; 1130 break; 1131 } 1132 } 1133 1134 xfrm_pol_hold(ret); 1135 fail: 1136 read_unlock_bh(&net->xfrm.xfrm_policy_lock); 1137 1138 return ret; 1139 } 1140 1141 static struct xfrm_policy * 1142 __xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir) 1143 { 1144 #ifdef CONFIG_XFRM_SUB_POLICY 1145 struct xfrm_policy *pol; 1146 1147 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); 1148 if (pol != NULL) 1149 return pol; 1150 #endif 1151 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); 1152 } 1153 1154 static int flow_to_policy_dir(int dir) 1155 { 1156 if (XFRM_POLICY_IN == FLOW_DIR_IN && 1157 XFRM_POLICY_OUT == FLOW_DIR_OUT && 1158 XFRM_POLICY_FWD == FLOW_DIR_FWD) 1159 return dir; 1160 1161 switch (dir) { 1162 default: 1163 case FLOW_DIR_IN: 1164 return XFRM_POLICY_IN; 1165 case FLOW_DIR_OUT: 1166 return XFRM_POLICY_OUT; 1167 case FLOW_DIR_FWD: 1168 return XFRM_POLICY_FWD; 1169 } 1170 } 1171 1172 static struct flow_cache_object * 1173 xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, 1174 u8 dir, struct flow_cache_object *old_obj, void *ctx) 1175 { 1176 struct xfrm_policy *pol; 1177 1178 if (old_obj) 1179 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); 1180 1181 pol = __xfrm_policy_lookup(net, fl, family, flow_to_policy_dir(dir)); 1182 if (IS_ERR_OR_NULL(pol)) 1183 return ERR_CAST(pol); 1184 1185 /* Resolver returns two references: 1186 * one for cache and one for caller of flow_cache_lookup() */ 1187 xfrm_pol_hold(pol); 1188 1189 return &pol->flo; 1190 } 1191 1192 static inline int policy_to_flow_dir(int dir) 1193 { 1194 if (XFRM_POLICY_IN == FLOW_DIR_IN && 1195 XFRM_POLICY_OUT == FLOW_DIR_OUT && 1196 XFRM_POLICY_FWD == FLOW_DIR_FWD) 1197 return dir; 1198 switch (dir) { 1199 default: 1200 case XFRM_POLICY_IN: 1201 return FLOW_DIR_IN; 1202 case XFRM_POLICY_OUT: 1203 return FLOW_DIR_OUT; 1204 case XFRM_POLICY_FWD: 1205 return FLOW_DIR_FWD; 1206 } 1207 } 1208 1209 static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, 1210 const struct flowi *fl) 1211 { 1212 struct xfrm_policy *pol; 1213 struct net *net = sock_net(sk); 1214 1215 read_lock_bh(&net->xfrm.xfrm_policy_lock); 1216 if ((pol = sk->sk_policy[dir]) != NULL) { 1217 bool match = xfrm_selector_match(&pol->selector, fl, 1218 sk->sk_family); 1219 int err = 0; 1220 1221 if (match) { 1222 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { 1223 pol = NULL; 1224 goto out; 1225 } 1226 err = security_xfrm_policy_lookup(pol->security, 1227 fl->flowi_secid, 1228 policy_to_flow_dir(dir)); 1229 if (!err) 1230 xfrm_pol_hold(pol); 1231 else if (err == -ESRCH) 1232 pol = NULL; 1233 else 1234 pol = ERR_PTR(err); 1235 } else 1236 pol = NULL; 1237 } 1238 out: 1239 read_unlock_bh(&net->xfrm.xfrm_policy_lock); 1240 return pol; 1241 } 1242 1243 static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 1244 { 1245 struct net *net = xp_net(pol); 1246 1247 list_add(&pol->walk.all, &net->xfrm.policy_all); 1248 net->xfrm.policy_count[dir]++; 1249 xfrm_pol_hold(pol); 1250 } 1251 1252 static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 1253 int dir) 1254 { 1255 struct net *net = xp_net(pol); 1256 1257 if (list_empty(&pol->walk.all)) 1258 return NULL; 1259 1260 /* Socket policies are not hashed. */ 1261 if (!hlist_unhashed(&pol->bydst)) { 1262 hlist_del(&pol->bydst); 1263 hlist_del(&pol->byidx); 1264 } 1265 1266 list_del_init(&pol->walk.all); 1267 net->xfrm.policy_count[dir]--; 1268 1269 return pol; 1270 } 1271 1272 static void xfrm_sk_policy_link(struct xfrm_policy *pol, int dir) 1273 { 1274 __xfrm_policy_link(pol, XFRM_POLICY_MAX + dir); 1275 } 1276 1277 static void xfrm_sk_policy_unlink(struct xfrm_policy *pol, int dir) 1278 { 1279 __xfrm_policy_unlink(pol, XFRM_POLICY_MAX + dir); 1280 } 1281 1282 int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 1283 { 1284 struct net *net = xp_net(pol); 1285 1286 write_lock_bh(&net->xfrm.xfrm_policy_lock); 1287 pol = __xfrm_policy_unlink(pol, dir); 1288 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 1289 if (pol) { 1290 xfrm_policy_kill(pol); 1291 return 0; 1292 } 1293 return -ENOENT; 1294 } 1295 EXPORT_SYMBOL(xfrm_policy_delete); 1296 1297 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 1298 { 1299 struct net *net = xp_net(pol); 1300 struct xfrm_policy *old_pol; 1301 1302 #ifdef CONFIG_XFRM_SUB_POLICY 1303 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 1304 return -EINVAL; 1305 #endif 1306 1307 write_lock_bh(&net->xfrm.xfrm_policy_lock); 1308 old_pol = sk->sk_policy[dir]; 1309 sk->sk_policy[dir] = pol; 1310 if (pol) { 1311 pol->curlft.add_time = get_seconds(); 1312 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir, 0); 1313 xfrm_sk_policy_link(pol, dir); 1314 } 1315 if (old_pol) { 1316 if (pol) 1317 xfrm_policy_requeue(old_pol, pol); 1318 1319 /* Unlinking succeeds always. This is the only function 1320 * allowed to delete or replace socket policy. 1321 */ 1322 xfrm_sk_policy_unlink(old_pol, dir); 1323 } 1324 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 1325 1326 if (old_pol) { 1327 xfrm_policy_kill(old_pol); 1328 } 1329 return 0; 1330 } 1331 1332 static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) 1333 { 1334 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); 1335 struct net *net = xp_net(old); 1336 1337 if (newp) { 1338 newp->selector = old->selector; 1339 if (security_xfrm_policy_clone(old->security, 1340 &newp->security)) { 1341 kfree(newp); 1342 return NULL; /* ENOMEM */ 1343 } 1344 newp->lft = old->lft; 1345 newp->curlft = old->curlft; 1346 newp->mark = old->mark; 1347 newp->action = old->action; 1348 newp->flags = old->flags; 1349 newp->xfrm_nr = old->xfrm_nr; 1350 newp->index = old->index; 1351 newp->type = old->type; 1352 memcpy(newp->xfrm_vec, old->xfrm_vec, 1353 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 1354 write_lock_bh(&net->xfrm.xfrm_policy_lock); 1355 xfrm_sk_policy_link(newp, dir); 1356 write_unlock_bh(&net->xfrm.xfrm_policy_lock); 1357 xfrm_pol_put(newp); 1358 } 1359 return newp; 1360 } 1361 1362 int __xfrm_sk_clone_policy(struct sock *sk) 1363 { 1364 struct xfrm_policy *p0 = sk->sk_policy[0], 1365 *p1 = sk->sk_policy[1]; 1366 1367 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 1368 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 1369 return -ENOMEM; 1370 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 1371 return -ENOMEM; 1372 return 0; 1373 } 1374 1375 static int 1376 xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote, 1377 unsigned short family) 1378 { 1379 int err; 1380 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1381 1382 if (unlikely(afinfo == NULL)) 1383 return -EINVAL; 1384 err = afinfo->get_saddr(net, local, remote); 1385 xfrm_policy_put_afinfo(afinfo); 1386 return err; 1387 } 1388 1389 /* Resolve list of templates for the flow, given policy. */ 1390 1391 static int 1392 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, 1393 struct xfrm_state **xfrm, unsigned short family) 1394 { 1395 struct net *net = xp_net(policy); 1396 int nx; 1397 int i, error; 1398 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 1399 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 1400 xfrm_address_t tmp; 1401 1402 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) { 1403 struct xfrm_state *x; 1404 xfrm_address_t *remote = daddr; 1405 xfrm_address_t *local = saddr; 1406 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 1407 1408 if (tmpl->mode == XFRM_MODE_TUNNEL || 1409 tmpl->mode == XFRM_MODE_BEET) { 1410 remote = &tmpl->id.daddr; 1411 local = &tmpl->saddr; 1412 if (xfrm_addr_any(local, tmpl->encap_family)) { 1413 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); 1414 if (error) 1415 goto fail; 1416 local = &tmp; 1417 } 1418 } 1419 1420 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 1421 1422 if (x && x->km.state == XFRM_STATE_VALID) { 1423 xfrm[nx++] = x; 1424 daddr = remote; 1425 saddr = local; 1426 continue; 1427 } 1428 if (x) { 1429 error = (x->km.state == XFRM_STATE_ERROR ? 1430 -EINVAL : -EAGAIN); 1431 xfrm_state_put(x); 1432 } else if (error == -ESRCH) { 1433 error = -EAGAIN; 1434 } 1435 1436 if (!tmpl->optional) 1437 goto fail; 1438 } 1439 return nx; 1440 1441 fail: 1442 for (nx--; nx >= 0; nx--) 1443 xfrm_state_put(xfrm[nx]); 1444 return error; 1445 } 1446 1447 static int 1448 xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, 1449 struct xfrm_state **xfrm, unsigned short family) 1450 { 1451 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 1452 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 1453 int cnx = 0; 1454 int error; 1455 int ret; 1456 int i; 1457 1458 for (i = 0; i < npols; i++) { 1459 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 1460 error = -ENOBUFS; 1461 goto fail; 1462 } 1463 1464 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 1465 if (ret < 0) { 1466 error = ret; 1467 goto fail; 1468 } else 1469 cnx += ret; 1470 } 1471 1472 /* found states are sorted for outbound processing */ 1473 if (npols > 1) 1474 xfrm_state_sort(xfrm, tpp, cnx, family); 1475 1476 return cnx; 1477 1478 fail: 1479 for (cnx--; cnx >= 0; cnx--) 1480 xfrm_state_put(tpp[cnx]); 1481 return error; 1482 1483 } 1484 1485 /* Check that the bundle accepts the flow and its components are 1486 * still valid. 1487 */ 1488 1489 static inline int xfrm_get_tos(const struct flowi *fl, int family) 1490 { 1491 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1492 int tos; 1493 1494 if (!afinfo) 1495 return -EINVAL; 1496 1497 tos = afinfo->get_tos(fl); 1498 1499 xfrm_policy_put_afinfo(afinfo); 1500 1501 return tos; 1502 } 1503 1504 static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) 1505 { 1506 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1507 struct dst_entry *dst = &xdst->u.dst; 1508 1509 if (xdst->route == NULL) { 1510 /* Dummy bundle - if it has xfrms we were not 1511 * able to build bundle as template resolution failed. 1512 * It means we need to try again resolving. */ 1513 if (xdst->num_xfrms > 0) 1514 return NULL; 1515 } else if (dst->flags & DST_XFRM_QUEUE) { 1516 return NULL; 1517 } else { 1518 /* Real bundle */ 1519 if (stale_bundle(dst)) 1520 return NULL; 1521 } 1522 1523 dst_hold(dst); 1524 return flo; 1525 } 1526 1527 static int xfrm_bundle_flo_check(struct flow_cache_object *flo) 1528 { 1529 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1530 struct dst_entry *dst = &xdst->u.dst; 1531 1532 if (!xdst->route) 1533 return 0; 1534 if (stale_bundle(dst)) 1535 return 0; 1536 1537 return 1; 1538 } 1539 1540 static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) 1541 { 1542 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1543 struct dst_entry *dst = &xdst->u.dst; 1544 1545 dst_free(dst); 1546 } 1547 1548 static const struct flow_cache_ops xfrm_bundle_fc_ops = { 1549 .get = xfrm_bundle_flo_get, 1550 .check = xfrm_bundle_flo_check, 1551 .delete = xfrm_bundle_flo_delete, 1552 }; 1553 1554 static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 1555 { 1556 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1557 struct dst_ops *dst_ops; 1558 struct xfrm_dst *xdst; 1559 1560 if (!afinfo) 1561 return ERR_PTR(-EINVAL); 1562 1563 switch (family) { 1564 case AF_INET: 1565 dst_ops = &net->xfrm.xfrm4_dst_ops; 1566 break; 1567 #if IS_ENABLED(CONFIG_IPV6) 1568 case AF_INET6: 1569 dst_ops = &net->xfrm.xfrm6_dst_ops; 1570 break; 1571 #endif 1572 default: 1573 BUG(); 1574 } 1575 xdst = dst_alloc(dst_ops, NULL, 0, DST_OBSOLETE_NONE, 0); 1576 1577 if (likely(xdst)) { 1578 struct dst_entry *dst = &xdst->u.dst; 1579 1580 memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); 1581 xdst->flo.ops = &xfrm_bundle_fc_ops; 1582 if (afinfo->init_dst) 1583 afinfo->init_dst(net, xdst); 1584 } else 1585 xdst = ERR_PTR(-ENOBUFS); 1586 1587 xfrm_policy_put_afinfo(afinfo); 1588 1589 return xdst; 1590 } 1591 1592 static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 1593 int nfheader_len) 1594 { 1595 struct xfrm_policy_afinfo *afinfo = 1596 xfrm_policy_get_afinfo(dst->ops->family); 1597 int err; 1598 1599 if (!afinfo) 1600 return -EINVAL; 1601 1602 err = afinfo->init_path(path, dst, nfheader_len); 1603 1604 xfrm_policy_put_afinfo(afinfo); 1605 1606 return err; 1607 } 1608 1609 static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, 1610 const struct flowi *fl) 1611 { 1612 struct xfrm_policy_afinfo *afinfo = 1613 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 1614 int err; 1615 1616 if (!afinfo) 1617 return -EINVAL; 1618 1619 err = afinfo->fill_dst(xdst, dev, fl); 1620 1621 xfrm_policy_put_afinfo(afinfo); 1622 1623 return err; 1624 } 1625 1626 1627 /* Allocate chain of dst_entry's, attach known xfrm's, calculate 1628 * all the metrics... Shortly, bundle a bundle. 1629 */ 1630 1631 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 1632 struct xfrm_state **xfrm, int nx, 1633 const struct flowi *fl, 1634 struct dst_entry *dst) 1635 { 1636 struct net *net = xp_net(policy); 1637 unsigned long now = jiffies; 1638 struct net_device *dev; 1639 struct xfrm_mode *inner_mode; 1640 struct dst_entry *dst_prev = NULL; 1641 struct dst_entry *dst0 = NULL; 1642 int i = 0; 1643 int err; 1644 int header_len = 0; 1645 int nfheader_len = 0; 1646 int trailer_len = 0; 1647 int tos; 1648 int family = policy->selector.family; 1649 xfrm_address_t saddr, daddr; 1650 1651 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 1652 1653 tos = xfrm_get_tos(fl, family); 1654 err = tos; 1655 if (tos < 0) 1656 goto put_states; 1657 1658 dst_hold(dst); 1659 1660 for (; i < nx; i++) { 1661 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 1662 struct dst_entry *dst1 = &xdst->u.dst; 1663 1664 err = PTR_ERR(xdst); 1665 if (IS_ERR(xdst)) { 1666 dst_release(dst); 1667 goto put_states; 1668 } 1669 1670 if (xfrm[i]->sel.family == AF_UNSPEC) { 1671 inner_mode = xfrm_ip2inner_mode(xfrm[i], 1672 xfrm_af2proto(family)); 1673 if (!inner_mode) { 1674 err = -EAFNOSUPPORT; 1675 dst_release(dst); 1676 goto put_states; 1677 } 1678 } else 1679 inner_mode = xfrm[i]->inner_mode; 1680 1681 if (!dst_prev) 1682 dst0 = dst1; 1683 else { 1684 dst_prev->child = dst_clone(dst1); 1685 dst1->flags |= DST_NOHASH; 1686 } 1687 1688 xdst->route = dst; 1689 dst_copy_metrics(dst1, dst); 1690 1691 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 1692 family = xfrm[i]->props.family; 1693 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr, 1694 family); 1695 err = PTR_ERR(dst); 1696 if (IS_ERR(dst)) 1697 goto put_states; 1698 } else 1699 dst_hold(dst); 1700 1701 dst1->xfrm = xfrm[i]; 1702 xdst->xfrm_genid = xfrm[i]->genid; 1703 1704 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 1705 dst1->flags |= DST_HOST; 1706 dst1->lastuse = now; 1707 1708 dst1->input = dst_discard; 1709 dst1->output = inner_mode->afinfo->output; 1710 1711 dst1->next = dst_prev; 1712 dst_prev = dst1; 1713 1714 header_len += xfrm[i]->props.header_len; 1715 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 1716 nfheader_len += xfrm[i]->props.header_len; 1717 trailer_len += xfrm[i]->props.trailer_len; 1718 } 1719 1720 dst_prev->child = dst; 1721 dst0->path = dst; 1722 1723 err = -ENODEV; 1724 dev = dst->dev; 1725 if (!dev) 1726 goto free_dst; 1727 1728 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1729 xfrm_init_pmtu(dst_prev); 1730 1731 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { 1732 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; 1733 1734 err = xfrm_fill_dst(xdst, dev, fl); 1735 if (err) 1736 goto free_dst; 1737 1738 dst_prev->header_len = header_len; 1739 dst_prev->trailer_len = trailer_len; 1740 header_len -= xdst->u.dst.xfrm->props.header_len; 1741 trailer_len -= xdst->u.dst.xfrm->props.trailer_len; 1742 } 1743 1744 out: 1745 return dst0; 1746 1747 put_states: 1748 for (; i < nx; i++) 1749 xfrm_state_put(xfrm[i]); 1750 free_dst: 1751 if (dst0) 1752 dst_free(dst0); 1753 dst0 = ERR_PTR(err); 1754 goto out; 1755 } 1756 1757 #ifdef CONFIG_XFRM_SUB_POLICY 1758 static int xfrm_dst_alloc_copy(void **target, const void *src, int size) 1759 { 1760 if (!*target) { 1761 *target = kmalloc(size, GFP_ATOMIC); 1762 if (!*target) 1763 return -ENOMEM; 1764 } 1765 1766 memcpy(*target, src, size); 1767 return 0; 1768 } 1769 #endif 1770 1771 static int xfrm_dst_update_parent(struct dst_entry *dst, 1772 const struct xfrm_selector *sel) 1773 { 1774 #ifdef CONFIG_XFRM_SUB_POLICY 1775 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1776 return xfrm_dst_alloc_copy((void **)&(xdst->partner), 1777 sel, sizeof(*sel)); 1778 #else 1779 return 0; 1780 #endif 1781 } 1782 1783 static int xfrm_dst_update_origin(struct dst_entry *dst, 1784 const struct flowi *fl) 1785 { 1786 #ifdef CONFIG_XFRM_SUB_POLICY 1787 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1788 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); 1789 #else 1790 return 0; 1791 #endif 1792 } 1793 1794 static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1795 struct xfrm_policy **pols, 1796 int *num_pols, int *num_xfrms) 1797 { 1798 int i; 1799 1800 if (*num_pols == 0 || !pols[0]) { 1801 *num_pols = 0; 1802 *num_xfrms = 0; 1803 return 0; 1804 } 1805 if (IS_ERR(pols[0])) 1806 return PTR_ERR(pols[0]); 1807 1808 *num_xfrms = pols[0]->xfrm_nr; 1809 1810 #ifdef CONFIG_XFRM_SUB_POLICY 1811 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && 1812 pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 1813 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), 1814 XFRM_POLICY_TYPE_MAIN, 1815 fl, family, 1816 XFRM_POLICY_OUT); 1817 if (pols[1]) { 1818 if (IS_ERR(pols[1])) { 1819 xfrm_pols_put(pols, *num_pols); 1820 return PTR_ERR(pols[1]); 1821 } 1822 (*num_pols)++; 1823 (*num_xfrms) += pols[1]->xfrm_nr; 1824 } 1825 } 1826 #endif 1827 for (i = 0; i < *num_pols; i++) { 1828 if (pols[i]->action != XFRM_POLICY_ALLOW) { 1829 *num_xfrms = -1; 1830 break; 1831 } 1832 } 1833 1834 return 0; 1835 1836 } 1837 1838 static struct xfrm_dst * 1839 xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, 1840 const struct flowi *fl, u16 family, 1841 struct dst_entry *dst_orig) 1842 { 1843 struct net *net = xp_net(pols[0]); 1844 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 1845 struct dst_entry *dst; 1846 struct xfrm_dst *xdst; 1847 int err; 1848 1849 /* Try to instantiate a bundle */ 1850 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1851 if (err <= 0) { 1852 if (err != 0 && err != -EAGAIN) 1853 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1854 return ERR_PTR(err); 1855 } 1856 1857 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); 1858 if (IS_ERR(dst)) { 1859 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); 1860 return ERR_CAST(dst); 1861 } 1862 1863 xdst = (struct xfrm_dst *)dst; 1864 xdst->num_xfrms = err; 1865 if (num_pols > 1) 1866 err = xfrm_dst_update_parent(dst, &pols[1]->selector); 1867 else 1868 err = xfrm_dst_update_origin(dst, fl); 1869 if (unlikely(err)) { 1870 dst_free(dst); 1871 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); 1872 return ERR_PTR(err); 1873 } 1874 1875 xdst->num_pols = num_pols; 1876 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 1877 xdst->policy_genid = atomic_read(&pols[0]->genid); 1878 1879 return xdst; 1880 } 1881 1882 static void xfrm_policy_queue_process(unsigned long arg) 1883 { 1884 struct sk_buff *skb; 1885 struct sock *sk; 1886 struct dst_entry *dst; 1887 struct xfrm_policy *pol = (struct xfrm_policy *)arg; 1888 struct xfrm_policy_queue *pq = &pol->polq; 1889 struct flowi fl; 1890 struct sk_buff_head list; 1891 1892 spin_lock(&pq->hold_queue.lock); 1893 skb = skb_peek(&pq->hold_queue); 1894 if (!skb) { 1895 spin_unlock(&pq->hold_queue.lock); 1896 goto out; 1897 } 1898 dst = skb_dst(skb); 1899 sk = skb->sk; 1900 xfrm_decode_session(skb, &fl, dst->ops->family); 1901 spin_unlock(&pq->hold_queue.lock); 1902 1903 dst_hold(dst->path); 1904 dst = xfrm_lookup(xp_net(pol), dst->path, &fl, 1905 sk, 0); 1906 if (IS_ERR(dst)) 1907 goto purge_queue; 1908 1909 if (dst->flags & DST_XFRM_QUEUE) { 1910 dst_release(dst); 1911 1912 if (pq->timeout >= XFRM_QUEUE_TMO_MAX) 1913 goto purge_queue; 1914 1915 pq->timeout = pq->timeout << 1; 1916 if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout)) 1917 xfrm_pol_hold(pol); 1918 goto out; 1919 } 1920 1921 dst_release(dst); 1922 1923 __skb_queue_head_init(&list); 1924 1925 spin_lock(&pq->hold_queue.lock); 1926 pq->timeout = 0; 1927 skb_queue_splice_init(&pq->hold_queue, &list); 1928 spin_unlock(&pq->hold_queue.lock); 1929 1930 while (!skb_queue_empty(&list)) { 1931 skb = __skb_dequeue(&list); 1932 1933 xfrm_decode_session(skb, &fl, skb_dst(skb)->ops->family); 1934 dst_hold(skb_dst(skb)->path); 1935 dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path, 1936 &fl, skb->sk, 0); 1937 if (IS_ERR(dst)) { 1938 kfree_skb(skb); 1939 continue; 1940 } 1941 1942 nf_reset(skb); 1943 skb_dst_drop(skb); 1944 skb_dst_set(skb, dst); 1945 1946 dst_output(skb); 1947 } 1948 1949 out: 1950 xfrm_pol_put(pol); 1951 return; 1952 1953 purge_queue: 1954 pq->timeout = 0; 1955 skb_queue_purge(&pq->hold_queue); 1956 xfrm_pol_put(pol); 1957 } 1958 1959 static int xdst_queue_output(struct sock *sk, struct sk_buff *skb) 1960 { 1961 unsigned long sched_next; 1962 struct dst_entry *dst = skb_dst(skb); 1963 struct xfrm_dst *xdst = (struct xfrm_dst *) dst; 1964 struct xfrm_policy *pol = xdst->pols[0]; 1965 struct xfrm_policy_queue *pq = &pol->polq; 1966 1967 if (unlikely(skb_fclone_busy(sk, skb))) { 1968 kfree_skb(skb); 1969 return 0; 1970 } 1971 1972 if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { 1973 kfree_skb(skb); 1974 return -EAGAIN; 1975 } 1976 1977 skb_dst_force(skb); 1978 1979 spin_lock_bh(&pq->hold_queue.lock); 1980 1981 if (!pq->timeout) 1982 pq->timeout = XFRM_QUEUE_TMO_MIN; 1983 1984 sched_next = jiffies + pq->timeout; 1985 1986 if (del_timer(&pq->hold_timer)) { 1987 if (time_before(pq->hold_timer.expires, sched_next)) 1988 sched_next = pq->hold_timer.expires; 1989 xfrm_pol_put(pol); 1990 } 1991 1992 __skb_queue_tail(&pq->hold_queue, skb); 1993 if (!mod_timer(&pq->hold_timer, sched_next)) 1994 xfrm_pol_hold(pol); 1995 1996 spin_unlock_bh(&pq->hold_queue.lock); 1997 1998 return 0; 1999 } 2000 2001 static struct xfrm_dst *xfrm_create_dummy_bundle(struct net *net, 2002 struct xfrm_flo *xflo, 2003 const struct flowi *fl, 2004 int num_xfrms, 2005 u16 family) 2006 { 2007 int err; 2008 struct net_device *dev; 2009 struct dst_entry *dst; 2010 struct dst_entry *dst1; 2011 struct xfrm_dst *xdst; 2012 2013 xdst = xfrm_alloc_dst(net, family); 2014 if (IS_ERR(xdst)) 2015 return xdst; 2016 2017 if (!(xflo->flags & XFRM_LOOKUP_QUEUE) || 2018 net->xfrm.sysctl_larval_drop || 2019 num_xfrms <= 0) 2020 return xdst; 2021 2022 dst = xflo->dst_orig; 2023 dst1 = &xdst->u.dst; 2024 dst_hold(dst); 2025 xdst->route = dst; 2026 2027 dst_copy_metrics(dst1, dst); 2028 2029 dst1->obsolete = DST_OBSOLETE_FORCE_CHK; 2030 dst1->flags |= DST_HOST | DST_XFRM_QUEUE; 2031 dst1->lastuse = jiffies; 2032 2033 dst1->input = dst_discard; 2034 dst1->output = xdst_queue_output; 2035 2036 dst_hold(dst); 2037 dst1->child = dst; 2038 dst1->path = dst; 2039 2040 xfrm_init_path((struct xfrm_dst *)dst1, dst, 0); 2041 2042 err = -ENODEV; 2043 dev = dst->dev; 2044 if (!dev) 2045 goto free_dst; 2046 2047 err = xfrm_fill_dst(xdst, dev, fl); 2048 if (err) 2049 goto free_dst; 2050 2051 out: 2052 return xdst; 2053 2054 free_dst: 2055 dst_release(dst1); 2056 xdst = ERR_PTR(err); 2057 goto out; 2058 } 2059 2060 static struct flow_cache_object * 2061 xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, 2062 struct flow_cache_object *oldflo, void *ctx) 2063 { 2064 struct xfrm_flo *xflo = (struct xfrm_flo *)ctx; 2065 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 2066 struct xfrm_dst *xdst, *new_xdst; 2067 int num_pols = 0, num_xfrms = 0, i, err, pol_dead; 2068 2069 /* Check if the policies from old bundle are usable */ 2070 xdst = NULL; 2071 if (oldflo) { 2072 xdst = container_of(oldflo, struct xfrm_dst, flo); 2073 num_pols = xdst->num_pols; 2074 num_xfrms = xdst->num_xfrms; 2075 pol_dead = 0; 2076 for (i = 0; i < num_pols; i++) { 2077 pols[i] = xdst->pols[i]; 2078 pol_dead |= pols[i]->walk.dead; 2079 } 2080 if (pol_dead) { 2081 dst_free(&xdst->u.dst); 2082 xdst = NULL; 2083 num_pols = 0; 2084 num_xfrms = 0; 2085 oldflo = NULL; 2086 } 2087 } 2088 2089 /* Resolve policies to use if we couldn't get them from 2090 * previous cache entry */ 2091 if (xdst == NULL) { 2092 num_pols = 1; 2093 pols[0] = __xfrm_policy_lookup(net, fl, family, 2094 flow_to_policy_dir(dir)); 2095 err = xfrm_expand_policies(fl, family, pols, 2096 &num_pols, &num_xfrms); 2097 if (err < 0) 2098 goto inc_error; 2099 if (num_pols == 0) 2100 return NULL; 2101 if (num_xfrms <= 0) 2102 goto make_dummy_bundle; 2103 } 2104 2105 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, 2106 xflo->dst_orig); 2107 if (IS_ERR(new_xdst)) { 2108 err = PTR_ERR(new_xdst); 2109 if (err != -EAGAIN) 2110 goto error; 2111 if (oldflo == NULL) 2112 goto make_dummy_bundle; 2113 dst_hold(&xdst->u.dst); 2114 return oldflo; 2115 } else if (new_xdst == NULL) { 2116 num_xfrms = 0; 2117 if (oldflo == NULL) 2118 goto make_dummy_bundle; 2119 xdst->num_xfrms = 0; 2120 dst_hold(&xdst->u.dst); 2121 return oldflo; 2122 } 2123 2124 /* Kill the previous bundle */ 2125 if (xdst) { 2126 /* The policies were stolen for newly generated bundle */ 2127 xdst->num_pols = 0; 2128 dst_free(&xdst->u.dst); 2129 } 2130 2131 /* Flow cache does not have reference, it dst_free()'s, 2132 * but we do need to return one reference for original caller */ 2133 dst_hold(&new_xdst->u.dst); 2134 return &new_xdst->flo; 2135 2136 make_dummy_bundle: 2137 /* We found policies, but there's no bundles to instantiate: 2138 * either because the policy blocks, has no transformations or 2139 * we could not build template (no xfrm_states).*/ 2140 xdst = xfrm_create_dummy_bundle(net, xflo, fl, num_xfrms, family); 2141 if (IS_ERR(xdst)) { 2142 xfrm_pols_put(pols, num_pols); 2143 return ERR_CAST(xdst); 2144 } 2145 xdst->num_pols = num_pols; 2146 xdst->num_xfrms = num_xfrms; 2147 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 2148 2149 dst_hold(&xdst->u.dst); 2150 return &xdst->flo; 2151 2152 inc_error: 2153 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 2154 error: 2155 if (xdst != NULL) 2156 dst_free(&xdst->u.dst); 2157 else 2158 xfrm_pols_put(pols, num_pols); 2159 return ERR_PTR(err); 2160 } 2161 2162 static struct dst_entry *make_blackhole(struct net *net, u16 family, 2163 struct dst_entry *dst_orig) 2164 { 2165 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2166 struct dst_entry *ret; 2167 2168 if (!afinfo) { 2169 dst_release(dst_orig); 2170 return ERR_PTR(-EINVAL); 2171 } else { 2172 ret = afinfo->blackhole_route(net, dst_orig); 2173 } 2174 xfrm_policy_put_afinfo(afinfo); 2175 2176 return ret; 2177 } 2178 2179 /* Main function: finds/creates a bundle for given flow. 2180 * 2181 * At the moment we eat a raw IP route. Mostly to speed up lookups 2182 * on interfaces with disabled IPsec. 2183 */ 2184 struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 2185 const struct flowi *fl, 2186 struct sock *sk, int flags) 2187 { 2188 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 2189 struct flow_cache_object *flo; 2190 struct xfrm_dst *xdst; 2191 struct dst_entry *dst, *route; 2192 u16 family = dst_orig->ops->family; 2193 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 2194 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 2195 2196 dst = NULL; 2197 xdst = NULL; 2198 route = NULL; 2199 2200 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 2201 num_pols = 1; 2202 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 2203 err = xfrm_expand_policies(fl, family, pols, 2204 &num_pols, &num_xfrms); 2205 if (err < 0) 2206 goto dropdst; 2207 2208 if (num_pols) { 2209 if (num_xfrms <= 0) { 2210 drop_pols = num_pols; 2211 goto no_transform; 2212 } 2213 2214 xdst = xfrm_resolve_and_create_bundle( 2215 pols, num_pols, fl, 2216 family, dst_orig); 2217 if (IS_ERR(xdst)) { 2218 xfrm_pols_put(pols, num_pols); 2219 err = PTR_ERR(xdst); 2220 goto dropdst; 2221 } else if (xdst == NULL) { 2222 num_xfrms = 0; 2223 drop_pols = num_pols; 2224 goto no_transform; 2225 } 2226 2227 dst_hold(&xdst->u.dst); 2228 xdst->u.dst.flags |= DST_NOCACHE; 2229 route = xdst->route; 2230 } 2231 } 2232 2233 if (xdst == NULL) { 2234 struct xfrm_flo xflo; 2235 2236 xflo.dst_orig = dst_orig; 2237 xflo.flags = flags; 2238 2239 /* To accelerate a bit... */ 2240 if ((dst_orig->flags & DST_NOXFRM) || 2241 !net->xfrm.policy_count[XFRM_POLICY_OUT]) 2242 goto nopol; 2243 2244 flo = flow_cache_lookup(net, fl, family, dir, 2245 xfrm_bundle_lookup, &xflo); 2246 if (flo == NULL) 2247 goto nopol; 2248 if (IS_ERR(flo)) { 2249 err = PTR_ERR(flo); 2250 goto dropdst; 2251 } 2252 xdst = container_of(flo, struct xfrm_dst, flo); 2253 2254 num_pols = xdst->num_pols; 2255 num_xfrms = xdst->num_xfrms; 2256 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy *) * num_pols); 2257 route = xdst->route; 2258 } 2259 2260 dst = &xdst->u.dst; 2261 if (route == NULL && num_xfrms > 0) { 2262 /* The only case when xfrm_bundle_lookup() returns a 2263 * bundle with null route, is when the template could 2264 * not be resolved. It means policies are there, but 2265 * bundle could not be created, since we don't yet 2266 * have the xfrm_state's. We need to wait for KM to 2267 * negotiate new SA's or bail out with error.*/ 2268 if (net->xfrm.sysctl_larval_drop) { 2269 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 2270 err = -EREMOTE; 2271 goto error; 2272 } 2273 2274 err = -EAGAIN; 2275 2276 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 2277 goto error; 2278 } 2279 2280 no_transform: 2281 if (num_pols == 0) 2282 goto nopol; 2283 2284 if ((flags & XFRM_LOOKUP_ICMP) && 2285 !(pols[0]->flags & XFRM_POLICY_ICMP)) { 2286 err = -ENOENT; 2287 goto error; 2288 } 2289 2290 for (i = 0; i < num_pols; i++) 2291 pols[i]->curlft.use_time = get_seconds(); 2292 2293 if (num_xfrms < 0) { 2294 /* Prohibit the flow */ 2295 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 2296 err = -EPERM; 2297 goto error; 2298 } else if (num_xfrms > 0) { 2299 /* Flow transformed */ 2300 dst_release(dst_orig); 2301 } else { 2302 /* Flow passes untransformed */ 2303 dst_release(dst); 2304 dst = dst_orig; 2305 } 2306 ok: 2307 xfrm_pols_put(pols, drop_pols); 2308 if (dst && dst->xfrm && 2309 dst->xfrm->props.mode == XFRM_MODE_TUNNEL) 2310 dst->flags |= DST_XFRM_TUNNEL; 2311 return dst; 2312 2313 nopol: 2314 if (!(flags & XFRM_LOOKUP_ICMP)) { 2315 dst = dst_orig; 2316 goto ok; 2317 } 2318 err = -ENOENT; 2319 error: 2320 dst_release(dst); 2321 dropdst: 2322 if (!(flags & XFRM_LOOKUP_KEEP_DST_REF)) 2323 dst_release(dst_orig); 2324 xfrm_pols_put(pols, drop_pols); 2325 return ERR_PTR(err); 2326 } 2327 EXPORT_SYMBOL(xfrm_lookup); 2328 2329 /* Callers of xfrm_lookup_route() must ensure a call to dst_output(). 2330 * Otherwise we may send out blackholed packets. 2331 */ 2332 struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig, 2333 const struct flowi *fl, 2334 struct sock *sk, int flags) 2335 { 2336 struct dst_entry *dst = xfrm_lookup(net, dst_orig, fl, sk, 2337 flags | XFRM_LOOKUP_QUEUE | 2338 XFRM_LOOKUP_KEEP_DST_REF); 2339 2340 if (IS_ERR(dst) && PTR_ERR(dst) == -EREMOTE) 2341 return make_blackhole(net, dst_orig->ops->family, dst_orig); 2342 2343 return dst; 2344 } 2345 EXPORT_SYMBOL(xfrm_lookup_route); 2346 2347 static inline int 2348 xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) 2349 { 2350 struct xfrm_state *x; 2351 2352 if (!skb->sp || idx < 0 || idx >= skb->sp->len) 2353 return 0; 2354 x = skb->sp->xvec[idx]; 2355 if (!x->type->reject) 2356 return 0; 2357 return x->type->reject(x, skb, fl); 2358 } 2359 2360 /* When skb is transformed back to its "native" form, we have to 2361 * check policy restrictions. At the moment we make this in maximally 2362 * stupid way. Shame on me. :-) Of course, connected sockets must 2363 * have policy cached at them. 2364 */ 2365 2366 static inline int 2367 xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, 2368 unsigned short family) 2369 { 2370 if (xfrm_state_kern(x)) 2371 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 2372 return x->id.proto == tmpl->id.proto && 2373 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 2374 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 2375 x->props.mode == tmpl->mode && 2376 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 2377 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 2378 !(x->props.mode != XFRM_MODE_TRANSPORT && 2379 xfrm_state_addr_cmp(tmpl, x, family)); 2380 } 2381 2382 /* 2383 * 0 or more than 0 is returned when validation is succeeded (either bypass 2384 * because of optional transport mode, or next index of the mathced secpath 2385 * state with the template. 2386 * -1 is returned when no matching template is found. 2387 * Otherwise "-2 - errored_index" is returned. 2388 */ 2389 static inline int 2390 xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, 2391 unsigned short family) 2392 { 2393 int idx = start; 2394 2395 if (tmpl->optional) { 2396 if (tmpl->mode == XFRM_MODE_TRANSPORT) 2397 return start; 2398 } else 2399 start = -1; 2400 for (; idx < sp->len; idx++) { 2401 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 2402 return ++idx; 2403 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 2404 if (start == -1) 2405 start = -2-idx; 2406 break; 2407 } 2408 } 2409 return start; 2410 } 2411 2412 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 2413 unsigned int family, int reverse) 2414 { 2415 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 2416 int err; 2417 2418 if (unlikely(afinfo == NULL)) 2419 return -EAFNOSUPPORT; 2420 2421 afinfo->decode_session(skb, fl, reverse); 2422 err = security_xfrm_decode_session(skb, &fl->flowi_secid); 2423 xfrm_policy_put_afinfo(afinfo); 2424 return err; 2425 } 2426 EXPORT_SYMBOL(__xfrm_decode_session); 2427 2428 static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) 2429 { 2430 for (; k < sp->len; k++) { 2431 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 2432 *idxp = k; 2433 return 1; 2434 } 2435 } 2436 2437 return 0; 2438 } 2439 2440 int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 2441 unsigned short family) 2442 { 2443 struct net *net = dev_net(skb->dev); 2444 struct xfrm_policy *pol; 2445 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 2446 int npols = 0; 2447 int xfrm_nr; 2448 int pi; 2449 int reverse; 2450 struct flowi fl; 2451 u8 fl_dir; 2452 int xerr_idx = -1; 2453 2454 reverse = dir & ~XFRM_POLICY_MASK; 2455 dir &= XFRM_POLICY_MASK; 2456 fl_dir = policy_to_flow_dir(dir); 2457 2458 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { 2459 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 2460 return 0; 2461 } 2462 2463 nf_nat_decode_session(skb, &fl, family); 2464 2465 /* First, check used SA against their selectors. */ 2466 if (skb->sp) { 2467 int i; 2468 2469 for (i = skb->sp->len-1; i >= 0; i--) { 2470 struct xfrm_state *x = skb->sp->xvec[i]; 2471 if (!xfrm_selector_match(&x->sel, &fl, family)) { 2472 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 2473 return 0; 2474 } 2475 } 2476 } 2477 2478 pol = NULL; 2479 if (sk && sk->sk_policy[dir]) { 2480 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2481 if (IS_ERR(pol)) { 2482 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2483 return 0; 2484 } 2485 } 2486 2487 if (!pol) { 2488 struct flow_cache_object *flo; 2489 2490 flo = flow_cache_lookup(net, &fl, family, fl_dir, 2491 xfrm_policy_lookup, NULL); 2492 if (IS_ERR_OR_NULL(flo)) 2493 pol = ERR_CAST(flo); 2494 else 2495 pol = container_of(flo, struct xfrm_policy, flo); 2496 } 2497 2498 if (IS_ERR(pol)) { 2499 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2500 return 0; 2501 } 2502 2503 if (!pol) { 2504 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { 2505 xfrm_secpath_reject(xerr_idx, skb, &fl); 2506 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 2507 return 0; 2508 } 2509 return 1; 2510 } 2511 2512 pol->curlft.use_time = get_seconds(); 2513 2514 pols[0] = pol; 2515 npols++; 2516 #ifdef CONFIG_XFRM_SUB_POLICY 2517 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 2518 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, 2519 &fl, family, 2520 XFRM_POLICY_IN); 2521 if (pols[1]) { 2522 if (IS_ERR(pols[1])) { 2523 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2524 return 0; 2525 } 2526 pols[1]->curlft.use_time = get_seconds(); 2527 npols++; 2528 } 2529 } 2530 #endif 2531 2532 if (pol->action == XFRM_POLICY_ALLOW) { 2533 struct sec_path *sp; 2534 static struct sec_path dummy; 2535 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 2536 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 2537 struct xfrm_tmpl **tpp = tp; 2538 int ti = 0; 2539 int i, k; 2540 2541 if ((sp = skb->sp) == NULL) 2542 sp = &dummy; 2543 2544 for (pi = 0; pi < npols; pi++) { 2545 if (pols[pi] != pol && 2546 pols[pi]->action != XFRM_POLICY_ALLOW) { 2547 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2548 goto reject; 2549 } 2550 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 2551 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 2552 goto reject_error; 2553 } 2554 for (i = 0; i < pols[pi]->xfrm_nr; i++) 2555 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 2556 } 2557 xfrm_nr = ti; 2558 if (npols > 1) { 2559 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family, net); 2560 tpp = stp; 2561 } 2562 2563 /* For each tunnel xfrm, find the first matching tmpl. 2564 * For each tmpl before that, find corresponding xfrm. 2565 * Order is _important_. Later we will implement 2566 * some barriers, but at the moment barriers 2567 * are implied between each two transformations. 2568 */ 2569 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 2570 k = xfrm_policy_ok(tpp[i], sp, k, family); 2571 if (k < 0) { 2572 if (k < -1) 2573 /* "-2 - errored_index" returned */ 2574 xerr_idx = -(2+k); 2575 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2576 goto reject; 2577 } 2578 } 2579 2580 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 2581 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2582 goto reject; 2583 } 2584 2585 xfrm_pols_put(pols, npols); 2586 return 1; 2587 } 2588 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2589 2590 reject: 2591 xfrm_secpath_reject(xerr_idx, skb, &fl); 2592 reject_error: 2593 xfrm_pols_put(pols, npols); 2594 return 0; 2595 } 2596 EXPORT_SYMBOL(__xfrm_policy_check); 2597 2598 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 2599 { 2600 struct net *net = dev_net(skb->dev); 2601 struct flowi fl; 2602 struct dst_entry *dst; 2603 int res = 1; 2604 2605 if (xfrm_decode_session(skb, &fl, family) < 0) { 2606 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 2607 return 0; 2608 } 2609 2610 skb_dst_force(skb); 2611 2612 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, XFRM_LOOKUP_QUEUE); 2613 if (IS_ERR(dst)) { 2614 res = 0; 2615 dst = NULL; 2616 } 2617 skb_dst_set(skb, dst); 2618 return res; 2619 } 2620 EXPORT_SYMBOL(__xfrm_route_forward); 2621 2622 /* Optimize later using cookies and generation ids. */ 2623 2624 static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 2625 { 2626 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 2627 * to DST_OBSOLETE_FORCE_CHK to force all XFRM destinations to 2628 * get validated by dst_ops->check on every use. We do this 2629 * because when a normal route referenced by an XFRM dst is 2630 * obsoleted we do not go looking around for all parent 2631 * referencing XFRM dsts so that we can invalidate them. It 2632 * is just too much work. Instead we make the checks here on 2633 * every use. For example: 2634 * 2635 * XFRM dst A --> IPv4 dst X 2636 * 2637 * X is the "xdst->route" of A (X is also the "dst->path" of A 2638 * in this example). If X is marked obsolete, "A" will not 2639 * notice. That's what we are validating here via the 2640 * stale_bundle() check. 2641 * 2642 * When a policy's bundle is pruned, we dst_free() the XFRM 2643 * dst which causes it's ->obsolete field to be set to 2644 * DST_OBSOLETE_DEAD. If an XFRM dst has been pruned like 2645 * this, we want to force a new route lookup. 2646 */ 2647 if (dst->obsolete < 0 && !stale_bundle(dst)) 2648 return dst; 2649 2650 return NULL; 2651 } 2652 2653 static int stale_bundle(struct dst_entry *dst) 2654 { 2655 return !xfrm_bundle_ok((struct xfrm_dst *)dst); 2656 } 2657 2658 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2659 { 2660 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 2661 dst->dev = dev_net(dev)->loopback_dev; 2662 dev_hold(dst->dev); 2663 dev_put(dev); 2664 } 2665 } 2666 EXPORT_SYMBOL(xfrm_dst_ifdown); 2667 2668 static void xfrm_link_failure(struct sk_buff *skb) 2669 { 2670 /* Impossible. Such dst must be popped before reaches point of failure. */ 2671 } 2672 2673 static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 2674 { 2675 if (dst) { 2676 if (dst->obsolete) { 2677 dst_release(dst); 2678 dst = NULL; 2679 } 2680 } 2681 return dst; 2682 } 2683 2684 void xfrm_garbage_collect(struct net *net) 2685 { 2686 flow_cache_flush(net); 2687 } 2688 EXPORT_SYMBOL(xfrm_garbage_collect); 2689 2690 static void xfrm_garbage_collect_deferred(struct net *net) 2691 { 2692 flow_cache_flush_deferred(net); 2693 } 2694 2695 static void xfrm_init_pmtu(struct dst_entry *dst) 2696 { 2697 do { 2698 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2699 u32 pmtu, route_mtu_cached; 2700 2701 pmtu = dst_mtu(dst->child); 2702 xdst->child_mtu_cached = pmtu; 2703 2704 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 2705 2706 route_mtu_cached = dst_mtu(xdst->route); 2707 xdst->route_mtu_cached = route_mtu_cached; 2708 2709 if (pmtu > route_mtu_cached) 2710 pmtu = route_mtu_cached; 2711 2712 dst_metric_set(dst, RTAX_MTU, pmtu); 2713 } while ((dst = dst->next)); 2714 } 2715 2716 /* Check that the bundle accepts the flow and its components are 2717 * still valid. 2718 */ 2719 2720 static int xfrm_bundle_ok(struct xfrm_dst *first) 2721 { 2722 struct dst_entry *dst = &first->u.dst; 2723 struct xfrm_dst *last; 2724 u32 mtu; 2725 2726 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 2727 (dst->dev && !netif_running(dst->dev))) 2728 return 0; 2729 2730 if (dst->flags & DST_XFRM_QUEUE) 2731 return 1; 2732 2733 last = NULL; 2734 2735 do { 2736 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2737 2738 if (dst->xfrm->km.state != XFRM_STATE_VALID) 2739 return 0; 2740 if (xdst->xfrm_genid != dst->xfrm->genid) 2741 return 0; 2742 if (xdst->num_pols > 0 && 2743 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 2744 return 0; 2745 2746 mtu = dst_mtu(dst->child); 2747 if (xdst->child_mtu_cached != mtu) { 2748 last = xdst; 2749 xdst->child_mtu_cached = mtu; 2750 } 2751 2752 if (!dst_check(xdst->route, xdst->route_cookie)) 2753 return 0; 2754 mtu = dst_mtu(xdst->route); 2755 if (xdst->route_mtu_cached != mtu) { 2756 last = xdst; 2757 xdst->route_mtu_cached = mtu; 2758 } 2759 2760 dst = dst->child; 2761 } while (dst->xfrm); 2762 2763 if (likely(!last)) 2764 return 1; 2765 2766 mtu = last->child_mtu_cached; 2767 for (;;) { 2768 dst = &last->u.dst; 2769 2770 mtu = xfrm_state_mtu(dst->xfrm, mtu); 2771 if (mtu > last->route_mtu_cached) 2772 mtu = last->route_mtu_cached; 2773 dst_metric_set(dst, RTAX_MTU, mtu); 2774 2775 if (last == first) 2776 break; 2777 2778 last = (struct xfrm_dst *)last->u.dst.next; 2779 last->child_mtu_cached = mtu; 2780 } 2781 2782 return 1; 2783 } 2784 2785 static unsigned int xfrm_default_advmss(const struct dst_entry *dst) 2786 { 2787 return dst_metric_advmss(dst->path); 2788 } 2789 2790 static unsigned int xfrm_mtu(const struct dst_entry *dst) 2791 { 2792 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 2793 2794 return mtu ? : dst_mtu(dst->path); 2795 } 2796 2797 static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, 2798 struct sk_buff *skb, 2799 const void *daddr) 2800 { 2801 return dst->path->ops->neigh_lookup(dst, skb, daddr); 2802 } 2803 2804 int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2805 { 2806 struct net *net; 2807 int err = 0; 2808 if (unlikely(afinfo == NULL)) 2809 return -EINVAL; 2810 if (unlikely(afinfo->family >= NPROTO)) 2811 return -EAFNOSUPPORT; 2812 spin_lock(&xfrm_policy_afinfo_lock); 2813 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 2814 err = -EEXIST; 2815 else { 2816 struct dst_ops *dst_ops = afinfo->dst_ops; 2817 if (likely(dst_ops->kmem_cachep == NULL)) 2818 dst_ops->kmem_cachep = xfrm_dst_cache; 2819 if (likely(dst_ops->check == NULL)) 2820 dst_ops->check = xfrm_dst_check; 2821 if (likely(dst_ops->default_advmss == NULL)) 2822 dst_ops->default_advmss = xfrm_default_advmss; 2823 if (likely(dst_ops->mtu == NULL)) 2824 dst_ops->mtu = xfrm_mtu; 2825 if (likely(dst_ops->negative_advice == NULL)) 2826 dst_ops->negative_advice = xfrm_negative_advice; 2827 if (likely(dst_ops->link_failure == NULL)) 2828 dst_ops->link_failure = xfrm_link_failure; 2829 if (likely(dst_ops->neigh_lookup == NULL)) 2830 dst_ops->neigh_lookup = xfrm_neigh_lookup; 2831 if (likely(afinfo->garbage_collect == NULL)) 2832 afinfo->garbage_collect = xfrm_garbage_collect_deferred; 2833 rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo); 2834 } 2835 spin_unlock(&xfrm_policy_afinfo_lock); 2836 2837 rtnl_lock(); 2838 for_each_net(net) { 2839 struct dst_ops *xfrm_dst_ops; 2840 2841 switch (afinfo->family) { 2842 case AF_INET: 2843 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; 2844 break; 2845 #if IS_ENABLED(CONFIG_IPV6) 2846 case AF_INET6: 2847 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; 2848 break; 2849 #endif 2850 default: 2851 BUG(); 2852 } 2853 *xfrm_dst_ops = *afinfo->dst_ops; 2854 } 2855 rtnl_unlock(); 2856 2857 return err; 2858 } 2859 EXPORT_SYMBOL(xfrm_policy_register_afinfo); 2860 2861 int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) 2862 { 2863 int err = 0; 2864 if (unlikely(afinfo == NULL)) 2865 return -EINVAL; 2866 if (unlikely(afinfo->family >= NPROTO)) 2867 return -EAFNOSUPPORT; 2868 spin_lock(&xfrm_policy_afinfo_lock); 2869 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 2870 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 2871 err = -EINVAL; 2872 else 2873 RCU_INIT_POINTER(xfrm_policy_afinfo[afinfo->family], 2874 NULL); 2875 } 2876 spin_unlock(&xfrm_policy_afinfo_lock); 2877 if (!err) { 2878 struct dst_ops *dst_ops = afinfo->dst_ops; 2879 2880 synchronize_rcu(); 2881 2882 dst_ops->kmem_cachep = NULL; 2883 dst_ops->check = NULL; 2884 dst_ops->negative_advice = NULL; 2885 dst_ops->link_failure = NULL; 2886 afinfo->garbage_collect = NULL; 2887 } 2888 return err; 2889 } 2890 EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2891 2892 static void __net_init xfrm_dst_ops_init(struct net *net) 2893 { 2894 struct xfrm_policy_afinfo *afinfo; 2895 2896 rcu_read_lock(); 2897 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET]); 2898 if (afinfo) 2899 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; 2900 #if IS_ENABLED(CONFIG_IPV6) 2901 afinfo = rcu_dereference(xfrm_policy_afinfo[AF_INET6]); 2902 if (afinfo) 2903 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; 2904 #endif 2905 rcu_read_unlock(); 2906 } 2907 2908 static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 2909 { 2910 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 2911 2912 switch (event) { 2913 case NETDEV_DOWN: 2914 xfrm_garbage_collect(dev_net(dev)); 2915 } 2916 return NOTIFY_DONE; 2917 } 2918 2919 static struct notifier_block xfrm_dev_notifier = { 2920 .notifier_call = xfrm_dev_event, 2921 }; 2922 2923 #ifdef CONFIG_XFRM_STATISTICS 2924 static int __net_init xfrm_statistics_init(struct net *net) 2925 { 2926 int rv; 2927 net->mib.xfrm_statistics = alloc_percpu(struct linux_xfrm_mib); 2928 if (!net->mib.xfrm_statistics) 2929 return -ENOMEM; 2930 rv = xfrm_proc_init(net); 2931 if (rv < 0) 2932 free_percpu(net->mib.xfrm_statistics); 2933 return rv; 2934 } 2935 2936 static void xfrm_statistics_fini(struct net *net) 2937 { 2938 xfrm_proc_fini(net); 2939 free_percpu(net->mib.xfrm_statistics); 2940 } 2941 #else 2942 static int __net_init xfrm_statistics_init(struct net *net) 2943 { 2944 return 0; 2945 } 2946 2947 static void xfrm_statistics_fini(struct net *net) 2948 { 2949 } 2950 #endif 2951 2952 static int __net_init xfrm_policy_init(struct net *net) 2953 { 2954 unsigned int hmask, sz; 2955 int dir; 2956 2957 if (net_eq(net, &init_net)) 2958 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 2959 sizeof(struct xfrm_dst), 2960 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2961 NULL); 2962 2963 hmask = 8 - 1; 2964 sz = (hmask+1) * sizeof(struct hlist_head); 2965 2966 net->xfrm.policy_byidx = xfrm_hash_alloc(sz); 2967 if (!net->xfrm.policy_byidx) 2968 goto out_byidx; 2969 net->xfrm.policy_idx_hmask = hmask; 2970 2971 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 2972 struct xfrm_policy_hash *htab; 2973 2974 net->xfrm.policy_count[dir] = 0; 2975 net->xfrm.policy_count[XFRM_POLICY_MAX + dir] = 0; 2976 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 2977 2978 htab = &net->xfrm.policy_bydst[dir]; 2979 htab->table = xfrm_hash_alloc(sz); 2980 if (!htab->table) 2981 goto out_bydst; 2982 htab->hmask = hmask; 2983 htab->dbits4 = 32; 2984 htab->sbits4 = 32; 2985 htab->dbits6 = 128; 2986 htab->sbits6 = 128; 2987 } 2988 net->xfrm.policy_hthresh.lbits4 = 32; 2989 net->xfrm.policy_hthresh.rbits4 = 32; 2990 net->xfrm.policy_hthresh.lbits6 = 128; 2991 net->xfrm.policy_hthresh.rbits6 = 128; 2992 2993 seqlock_init(&net->xfrm.policy_hthresh.lock); 2994 2995 INIT_LIST_HEAD(&net->xfrm.policy_all); 2996 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 2997 INIT_WORK(&net->xfrm.policy_hthresh.work, xfrm_hash_rebuild); 2998 if (net_eq(net, &init_net)) 2999 register_netdevice_notifier(&xfrm_dev_notifier); 3000 return 0; 3001 3002 out_bydst: 3003 for (dir--; dir >= 0; dir--) { 3004 struct xfrm_policy_hash *htab; 3005 3006 htab = &net->xfrm.policy_bydst[dir]; 3007 xfrm_hash_free(htab->table, sz); 3008 } 3009 xfrm_hash_free(net->xfrm.policy_byidx, sz); 3010 out_byidx: 3011 return -ENOMEM; 3012 } 3013 3014 static void xfrm_policy_fini(struct net *net) 3015 { 3016 unsigned int sz; 3017 int dir; 3018 3019 flush_work(&net->xfrm.policy_hash_work); 3020 #ifdef CONFIG_XFRM_SUB_POLICY 3021 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, false); 3022 #endif 3023 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, false); 3024 3025 WARN_ON(!list_empty(&net->xfrm.policy_all)); 3026 3027 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 3028 struct xfrm_policy_hash *htab; 3029 3030 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 3031 3032 htab = &net->xfrm.policy_bydst[dir]; 3033 sz = (htab->hmask + 1) * sizeof(struct hlist_head); 3034 WARN_ON(!hlist_empty(htab->table)); 3035 xfrm_hash_free(htab->table, sz); 3036 } 3037 3038 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); 3039 WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); 3040 xfrm_hash_free(net->xfrm.policy_byidx, sz); 3041 } 3042 3043 static int __net_init xfrm_net_init(struct net *net) 3044 { 3045 int rv; 3046 3047 rv = xfrm_statistics_init(net); 3048 if (rv < 0) 3049 goto out_statistics; 3050 rv = xfrm_state_init(net); 3051 if (rv < 0) 3052 goto out_state; 3053 rv = xfrm_policy_init(net); 3054 if (rv < 0) 3055 goto out_policy; 3056 xfrm_dst_ops_init(net); 3057 rv = xfrm_sysctl_init(net); 3058 if (rv < 0) 3059 goto out_sysctl; 3060 rv = flow_cache_init(net); 3061 if (rv < 0) 3062 goto out; 3063 3064 /* Initialize the per-net locks here */ 3065 spin_lock_init(&net->xfrm.xfrm_state_lock); 3066 rwlock_init(&net->xfrm.xfrm_policy_lock); 3067 mutex_init(&net->xfrm.xfrm_cfg_mutex); 3068 3069 return 0; 3070 3071 out: 3072 xfrm_sysctl_fini(net); 3073 out_sysctl: 3074 xfrm_policy_fini(net); 3075 out_policy: 3076 xfrm_state_fini(net); 3077 out_state: 3078 xfrm_statistics_fini(net); 3079 out_statistics: 3080 return rv; 3081 } 3082 3083 static void __net_exit xfrm_net_exit(struct net *net) 3084 { 3085 flow_cache_fini(net); 3086 xfrm_sysctl_fini(net); 3087 xfrm_policy_fini(net); 3088 xfrm_state_fini(net); 3089 xfrm_statistics_fini(net); 3090 } 3091 3092 static struct pernet_operations __net_initdata xfrm_net_ops = { 3093 .init = xfrm_net_init, 3094 .exit = xfrm_net_exit, 3095 }; 3096 3097 void __init xfrm_init(void) 3098 { 3099 register_pernet_subsys(&xfrm_net_ops); 3100 xfrm_input_init(); 3101 } 3102 3103 #ifdef CONFIG_AUDITSYSCALL 3104 static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 3105 struct audit_buffer *audit_buf) 3106 { 3107 struct xfrm_sec_ctx *ctx = xp->security; 3108 struct xfrm_selector *sel = &xp->selector; 3109 3110 if (ctx) 3111 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 3112 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 3113 3114 switch (sel->family) { 3115 case AF_INET: 3116 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); 3117 if (sel->prefixlen_s != 32) 3118 audit_log_format(audit_buf, " src_prefixlen=%d", 3119 sel->prefixlen_s); 3120 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); 3121 if (sel->prefixlen_d != 32) 3122 audit_log_format(audit_buf, " dst_prefixlen=%d", 3123 sel->prefixlen_d); 3124 break; 3125 case AF_INET6: 3126 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); 3127 if (sel->prefixlen_s != 128) 3128 audit_log_format(audit_buf, " src_prefixlen=%d", 3129 sel->prefixlen_s); 3130 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); 3131 if (sel->prefixlen_d != 128) 3132 audit_log_format(audit_buf, " dst_prefixlen=%d", 3133 sel->prefixlen_d); 3134 break; 3135 } 3136 } 3137 3138 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid) 3139 { 3140 struct audit_buffer *audit_buf; 3141 3142 audit_buf = xfrm_audit_start("SPD-add"); 3143 if (audit_buf == NULL) 3144 return; 3145 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 3146 audit_log_format(audit_buf, " res=%u", result); 3147 xfrm_audit_common_policyinfo(xp, audit_buf); 3148 audit_log_end(audit_buf); 3149 } 3150 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 3151 3152 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 3153 bool task_valid) 3154 { 3155 struct audit_buffer *audit_buf; 3156 3157 audit_buf = xfrm_audit_start("SPD-delete"); 3158 if (audit_buf == NULL) 3159 return; 3160 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 3161 audit_log_format(audit_buf, " res=%u", result); 3162 xfrm_audit_common_policyinfo(xp, audit_buf); 3163 audit_log_end(audit_buf); 3164 } 3165 EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 3166 #endif 3167 3168 #ifdef CONFIG_XFRM_MIGRATE 3169 static bool xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 3170 const struct xfrm_selector *sel_tgt) 3171 { 3172 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 3173 if (sel_tgt->family == sel_cmp->family && 3174 xfrm_addr_equal(&sel_tgt->daddr, &sel_cmp->daddr, 3175 sel_cmp->family) && 3176 xfrm_addr_equal(&sel_tgt->saddr, &sel_cmp->saddr, 3177 sel_cmp->family) && 3178 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 3179 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 3180 return true; 3181 } 3182 } else { 3183 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 3184 return true; 3185 } 3186 } 3187 return false; 3188 } 3189 3190 static struct xfrm_policy *xfrm_migrate_policy_find(const struct xfrm_selector *sel, 3191 u8 dir, u8 type, struct net *net) 3192 { 3193 struct xfrm_policy *pol, *ret = NULL; 3194 struct hlist_head *chain; 3195 u32 priority = ~0U; 3196 3197 read_lock_bh(&net->xfrm.xfrm_policy_lock); /*FIXME*/ 3198 chain = policy_hash_direct(net, &sel->daddr, &sel->saddr, sel->family, dir); 3199 hlist_for_each_entry(pol, chain, bydst) { 3200 if (xfrm_migrate_selector_match(sel, &pol->selector) && 3201 pol->type == type) { 3202 ret = pol; 3203 priority = ret->priority; 3204 break; 3205 } 3206 } 3207 chain = &net->xfrm.policy_inexact[dir]; 3208 hlist_for_each_entry(pol, chain, bydst) { 3209 if ((pol->priority >= priority) && ret) 3210 break; 3211 3212 if (xfrm_migrate_selector_match(sel, &pol->selector) && 3213 pol->type == type) { 3214 ret = pol; 3215 break; 3216 } 3217 } 3218 3219 xfrm_pol_hold(ret); 3220 3221 read_unlock_bh(&net->xfrm.xfrm_policy_lock); 3222 3223 return ret; 3224 } 3225 3226 static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) 3227 { 3228 int match = 0; 3229 3230 if (t->mode == m->mode && t->id.proto == m->proto && 3231 (m->reqid == 0 || t->reqid == m->reqid)) { 3232 switch (t->mode) { 3233 case XFRM_MODE_TUNNEL: 3234 case XFRM_MODE_BEET: 3235 if (xfrm_addr_equal(&t->id.daddr, &m->old_daddr, 3236 m->old_family) && 3237 xfrm_addr_equal(&t->saddr, &m->old_saddr, 3238 m->old_family)) { 3239 match = 1; 3240 } 3241 break; 3242 case XFRM_MODE_TRANSPORT: 3243 /* in case of transport mode, template does not store 3244 any IP addresses, hence we just compare mode and 3245 protocol */ 3246 match = 1; 3247 break; 3248 default: 3249 break; 3250 } 3251 } 3252 return match; 3253 } 3254 3255 /* update endpoint address(es) of template(s) */ 3256 static int xfrm_policy_migrate(struct xfrm_policy *pol, 3257 struct xfrm_migrate *m, int num_migrate) 3258 { 3259 struct xfrm_migrate *mp; 3260 int i, j, n = 0; 3261 3262 write_lock_bh(&pol->lock); 3263 if (unlikely(pol->walk.dead)) { 3264 /* target policy has been deleted */ 3265 write_unlock_bh(&pol->lock); 3266 return -ENOENT; 3267 } 3268 3269 for (i = 0; i < pol->xfrm_nr; i++) { 3270 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 3271 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 3272 continue; 3273 n++; 3274 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 3275 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 3276 continue; 3277 /* update endpoints */ 3278 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 3279 sizeof(pol->xfrm_vec[i].id.daddr)); 3280 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 3281 sizeof(pol->xfrm_vec[i].saddr)); 3282 pol->xfrm_vec[i].encap_family = mp->new_family; 3283 /* flush bundles */ 3284 atomic_inc(&pol->genid); 3285 } 3286 } 3287 3288 write_unlock_bh(&pol->lock); 3289 3290 if (!n) 3291 return -ENODATA; 3292 3293 return 0; 3294 } 3295 3296 static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) 3297 { 3298 int i, j; 3299 3300 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) 3301 return -EINVAL; 3302 3303 for (i = 0; i < num_migrate; i++) { 3304 if (xfrm_addr_equal(&m[i].old_daddr, &m[i].new_daddr, 3305 m[i].old_family) && 3306 xfrm_addr_equal(&m[i].old_saddr, &m[i].new_saddr, 3307 m[i].old_family)) 3308 return -EINVAL; 3309 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 3310 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 3311 return -EINVAL; 3312 3313 /* check if there is any duplicated entry */ 3314 for (j = i + 1; j < num_migrate; j++) { 3315 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 3316 sizeof(m[i].old_daddr)) && 3317 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 3318 sizeof(m[i].old_saddr)) && 3319 m[i].proto == m[j].proto && 3320 m[i].mode == m[j].mode && 3321 m[i].reqid == m[j].reqid && 3322 m[i].old_family == m[j].old_family) 3323 return -EINVAL; 3324 } 3325 } 3326 3327 return 0; 3328 } 3329 3330 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 3331 struct xfrm_migrate *m, int num_migrate, 3332 struct xfrm_kmaddress *k, struct net *net) 3333 { 3334 int i, err, nx_cur = 0, nx_new = 0; 3335 struct xfrm_policy *pol = NULL; 3336 struct xfrm_state *x, *xc; 3337 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 3338 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 3339 struct xfrm_migrate *mp; 3340 3341 if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 3342 goto out; 3343 3344 /* Stage 1 - find policy */ 3345 if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) { 3346 err = -ENOENT; 3347 goto out; 3348 } 3349 3350 /* Stage 2 - find and update state(s) */ 3351 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 3352 if ((x = xfrm_migrate_state_find(mp, net))) { 3353 x_cur[nx_cur] = x; 3354 nx_cur++; 3355 if ((xc = xfrm_state_migrate(x, mp))) { 3356 x_new[nx_new] = xc; 3357 nx_new++; 3358 } else { 3359 err = -ENODATA; 3360 goto restore_state; 3361 } 3362 } 3363 } 3364 3365 /* Stage 3 - update policy */ 3366 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) 3367 goto restore_state; 3368 3369 /* Stage 4 - delete old state(s) */ 3370 if (nx_cur) { 3371 xfrm_states_put(x_cur, nx_cur); 3372 xfrm_states_delete(x_cur, nx_cur); 3373 } 3374 3375 /* Stage 5 - announce */ 3376 km_migrate(sel, dir, type, m, num_migrate, k); 3377 3378 xfrm_pol_put(pol); 3379 3380 return 0; 3381 out: 3382 return err; 3383 3384 restore_state: 3385 if (pol) 3386 xfrm_pol_put(pol); 3387 if (nx_cur) 3388 xfrm_states_put(x_cur, nx_cur); 3389 if (nx_new) 3390 xfrm_states_delete(x_new, nx_new); 3391 3392 return err; 3393 } 3394 EXPORT_SYMBOL(xfrm_migrate); 3395 #endif 3396