1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * xfrm_state.c 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * YOSHIFUJI Hideaki @USAGI 11 * Split up af-specific functions 12 * Derek Atkins <derek@ihtfp.com> 13 * Add UDP Encapsulation 14 * 15 */ 16 17 #include <linux/compat.h> 18 #include <linux/workqueue.h> 19 #include <net/xfrm.h> 20 #include <linux/pfkeyv2.h> 21 #include <linux/ipsec.h> 22 #include <linux/module.h> 23 #include <linux/cache.h> 24 #include <linux/audit.h> 25 #include <linux/uaccess.h> 26 #include <linux/ktime.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/kernel.h> 30 31 #include <crypto/aead.h> 32 33 #include "xfrm_hash.h" 34 35 #define xfrm_state_deref_prot(table, net) \ 36 rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock)) 37 #define xfrm_state_deref_check(table, net) \ 38 rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock)) 39 40 static void xfrm_state_gc_task(struct work_struct *work); 41 42 /* Each xfrm_state may be linked to two tables: 43 44 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl) 45 2. Hash table by (daddr,family,reqid) to find what SAs exist for given 46 destination/tunnel endpoint. (output) 47 */ 48 49 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 50 static struct kmem_cache *xfrm_state_cache __ro_after_init; 51 52 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task); 53 static HLIST_HEAD(xfrm_state_gc_list); 54 static HLIST_HEAD(xfrm_state_dev_gc_list); 55 56 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x) 57 { 58 return refcount_inc_not_zero(&x->refcnt); 59 } 60 61 static inline unsigned int xfrm_dst_hash(struct net *net, 62 const xfrm_address_t *daddr, 63 const xfrm_address_t *saddr, 64 u32 reqid, 65 unsigned short family) 66 { 67 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 68 69 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask); 70 } 71 72 static inline unsigned int xfrm_src_hash(struct net *net, 73 const xfrm_address_t *daddr, 74 const xfrm_address_t *saddr, 75 unsigned short family) 76 { 77 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 78 79 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask); 80 } 81 82 static inline unsigned int 83 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr, 84 __be32 spi, u8 proto, unsigned short family) 85 { 86 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 87 88 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask); 89 } 90 91 static unsigned int xfrm_seq_hash(struct net *net, u32 seq) 92 { 93 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 94 95 return __xfrm_seq_hash(seq, net->xfrm.state_hmask); 96 } 97 98 #define XFRM_STATE_INSERT(by, _n, _h, _type) \ 99 { \ 100 struct xfrm_state *_x = NULL; \ 101 \ 102 if (_type != XFRM_DEV_OFFLOAD_PACKET) { \ 103 hlist_for_each_entry_rcu(_x, _h, by) { \ 104 if (_x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \ 105 continue; \ 106 break; \ 107 } \ 108 } \ 109 \ 110 if (!_x || _x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \ 111 /* SAD is empty or consist from HW SAs only */ \ 112 hlist_add_head_rcu(_n, _h); \ 113 else \ 114 hlist_add_before_rcu(_n, &_x->by); \ 115 } 116 117 static void xfrm_hash_transfer(struct hlist_head *list, 118 struct hlist_head *ndsttable, 119 struct hlist_head *nsrctable, 120 struct hlist_head *nspitable, 121 struct hlist_head *nseqtable, 122 unsigned int nhashmask) 123 { 124 struct hlist_node *tmp; 125 struct xfrm_state *x; 126 127 hlist_for_each_entry_safe(x, tmp, list, bydst) { 128 unsigned int h; 129 130 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, 131 x->props.reqid, x->props.family, 132 nhashmask); 133 XFRM_STATE_INSERT(bydst, &x->bydst, ndsttable + h, x->xso.type); 134 135 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr, 136 x->props.family, 137 nhashmask); 138 XFRM_STATE_INSERT(bysrc, &x->bysrc, nsrctable + h, x->xso.type); 139 140 if (x->id.spi) { 141 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, 142 x->id.proto, x->props.family, 143 nhashmask); 144 XFRM_STATE_INSERT(byspi, &x->byspi, nspitable + h, 145 x->xso.type); 146 } 147 148 if (x->km.seq) { 149 h = __xfrm_seq_hash(x->km.seq, nhashmask); 150 XFRM_STATE_INSERT(byseq, &x->byseq, nseqtable + h, 151 x->xso.type); 152 } 153 } 154 } 155 156 static unsigned long xfrm_hash_new_size(unsigned int state_hmask) 157 { 158 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head); 159 } 160 161 static void xfrm_hash_resize(struct work_struct *work) 162 { 163 struct net *net = container_of(work, struct net, xfrm.state_hash_work); 164 struct hlist_head *ndst, *nsrc, *nspi, *nseq, *odst, *osrc, *ospi, *oseq; 165 unsigned long nsize, osize; 166 unsigned int nhashmask, ohashmask; 167 int i; 168 169 nsize = xfrm_hash_new_size(net->xfrm.state_hmask); 170 ndst = xfrm_hash_alloc(nsize); 171 if (!ndst) 172 return; 173 nsrc = xfrm_hash_alloc(nsize); 174 if (!nsrc) { 175 xfrm_hash_free(ndst, nsize); 176 return; 177 } 178 nspi = xfrm_hash_alloc(nsize); 179 if (!nspi) { 180 xfrm_hash_free(ndst, nsize); 181 xfrm_hash_free(nsrc, nsize); 182 return; 183 } 184 nseq = xfrm_hash_alloc(nsize); 185 if (!nseq) { 186 xfrm_hash_free(ndst, nsize); 187 xfrm_hash_free(nsrc, nsize); 188 xfrm_hash_free(nspi, nsize); 189 return; 190 } 191 192 spin_lock_bh(&net->xfrm.xfrm_state_lock); 193 write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); 194 195 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; 196 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net); 197 for (i = net->xfrm.state_hmask; i >= 0; i--) 198 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nseq, nhashmask); 199 200 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net); 201 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net); 202 oseq = xfrm_state_deref_prot(net->xfrm.state_byseq, net); 203 ohashmask = net->xfrm.state_hmask; 204 205 rcu_assign_pointer(net->xfrm.state_bydst, ndst); 206 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc); 207 rcu_assign_pointer(net->xfrm.state_byspi, nspi); 208 rcu_assign_pointer(net->xfrm.state_byseq, nseq); 209 net->xfrm.state_hmask = nhashmask; 210 211 write_seqcount_end(&net->xfrm.xfrm_state_hash_generation); 212 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 213 214 osize = (ohashmask + 1) * sizeof(struct hlist_head); 215 216 synchronize_rcu(); 217 218 xfrm_hash_free(odst, osize); 219 xfrm_hash_free(osrc, osize); 220 xfrm_hash_free(ospi, osize); 221 xfrm_hash_free(oseq, osize); 222 } 223 224 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock); 225 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO]; 226 227 static DEFINE_SPINLOCK(xfrm_state_gc_lock); 228 static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock); 229 230 int __xfrm_state_delete(struct xfrm_state *x); 231 232 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); 233 static bool km_is_alive(const struct km_event *c); 234 void km_state_expired(struct xfrm_state *x, int hard, u32 portid); 235 236 int xfrm_register_type(const struct xfrm_type *type, unsigned short family) 237 { 238 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 239 int err = 0; 240 241 if (!afinfo) 242 return -EAFNOSUPPORT; 243 244 #define X(afi, T, name) do { \ 245 WARN_ON((afi)->type_ ## name); \ 246 (afi)->type_ ## name = (T); \ 247 } while (0) 248 249 switch (type->proto) { 250 case IPPROTO_COMP: 251 X(afinfo, type, comp); 252 break; 253 case IPPROTO_AH: 254 X(afinfo, type, ah); 255 break; 256 case IPPROTO_ESP: 257 X(afinfo, type, esp); 258 break; 259 case IPPROTO_IPIP: 260 X(afinfo, type, ipip); 261 break; 262 case IPPROTO_DSTOPTS: 263 X(afinfo, type, dstopts); 264 break; 265 case IPPROTO_ROUTING: 266 X(afinfo, type, routing); 267 break; 268 case IPPROTO_IPV6: 269 X(afinfo, type, ipip6); 270 break; 271 default: 272 WARN_ON(1); 273 err = -EPROTONOSUPPORT; 274 break; 275 } 276 #undef X 277 rcu_read_unlock(); 278 return err; 279 } 280 EXPORT_SYMBOL(xfrm_register_type); 281 282 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family) 283 { 284 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 285 286 if (unlikely(afinfo == NULL)) 287 return; 288 289 #define X(afi, T, name) do { \ 290 WARN_ON((afi)->type_ ## name != (T)); \ 291 (afi)->type_ ## name = NULL; \ 292 } while (0) 293 294 switch (type->proto) { 295 case IPPROTO_COMP: 296 X(afinfo, type, comp); 297 break; 298 case IPPROTO_AH: 299 X(afinfo, type, ah); 300 break; 301 case IPPROTO_ESP: 302 X(afinfo, type, esp); 303 break; 304 case IPPROTO_IPIP: 305 X(afinfo, type, ipip); 306 break; 307 case IPPROTO_DSTOPTS: 308 X(afinfo, type, dstopts); 309 break; 310 case IPPROTO_ROUTING: 311 X(afinfo, type, routing); 312 break; 313 case IPPROTO_IPV6: 314 X(afinfo, type, ipip6); 315 break; 316 default: 317 WARN_ON(1); 318 break; 319 } 320 #undef X 321 rcu_read_unlock(); 322 } 323 EXPORT_SYMBOL(xfrm_unregister_type); 324 325 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family) 326 { 327 const struct xfrm_type *type = NULL; 328 struct xfrm_state_afinfo *afinfo; 329 int modload_attempted = 0; 330 331 retry: 332 afinfo = xfrm_state_get_afinfo(family); 333 if (unlikely(afinfo == NULL)) 334 return NULL; 335 336 switch (proto) { 337 case IPPROTO_COMP: 338 type = afinfo->type_comp; 339 break; 340 case IPPROTO_AH: 341 type = afinfo->type_ah; 342 break; 343 case IPPROTO_ESP: 344 type = afinfo->type_esp; 345 break; 346 case IPPROTO_IPIP: 347 type = afinfo->type_ipip; 348 break; 349 case IPPROTO_DSTOPTS: 350 type = afinfo->type_dstopts; 351 break; 352 case IPPROTO_ROUTING: 353 type = afinfo->type_routing; 354 break; 355 case IPPROTO_IPV6: 356 type = afinfo->type_ipip6; 357 break; 358 default: 359 break; 360 } 361 362 if (unlikely(type && !try_module_get(type->owner))) 363 type = NULL; 364 365 rcu_read_unlock(); 366 367 if (!type && !modload_attempted) { 368 request_module("xfrm-type-%d-%d", family, proto); 369 modload_attempted = 1; 370 goto retry; 371 } 372 373 return type; 374 } 375 376 static void xfrm_put_type(const struct xfrm_type *type) 377 { 378 module_put(type->owner); 379 } 380 381 int xfrm_register_type_offload(const struct xfrm_type_offload *type, 382 unsigned short family) 383 { 384 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 385 int err = 0; 386 387 if (unlikely(afinfo == NULL)) 388 return -EAFNOSUPPORT; 389 390 switch (type->proto) { 391 case IPPROTO_ESP: 392 WARN_ON(afinfo->type_offload_esp); 393 afinfo->type_offload_esp = type; 394 break; 395 default: 396 WARN_ON(1); 397 err = -EPROTONOSUPPORT; 398 break; 399 } 400 401 rcu_read_unlock(); 402 return err; 403 } 404 EXPORT_SYMBOL(xfrm_register_type_offload); 405 406 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, 407 unsigned short family) 408 { 409 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 410 411 if (unlikely(afinfo == NULL)) 412 return; 413 414 switch (type->proto) { 415 case IPPROTO_ESP: 416 WARN_ON(afinfo->type_offload_esp != type); 417 afinfo->type_offload_esp = NULL; 418 break; 419 default: 420 WARN_ON(1); 421 break; 422 } 423 rcu_read_unlock(); 424 } 425 EXPORT_SYMBOL(xfrm_unregister_type_offload); 426 427 void xfrm_set_type_offload(struct xfrm_state *x, bool try_load) 428 { 429 const struct xfrm_type_offload *type = NULL; 430 struct xfrm_state_afinfo *afinfo; 431 432 retry: 433 afinfo = xfrm_state_get_afinfo(x->props.family); 434 if (unlikely(afinfo == NULL)) 435 goto out; 436 437 switch (x->id.proto) { 438 case IPPROTO_ESP: 439 type = afinfo->type_offload_esp; 440 break; 441 default: 442 break; 443 } 444 445 if ((type && !try_module_get(type->owner))) 446 type = NULL; 447 448 rcu_read_unlock(); 449 450 if (!type && try_load) { 451 request_module("xfrm-offload-%d-%d", x->props.family, 452 x->id.proto); 453 try_load = false; 454 goto retry; 455 } 456 457 out: 458 x->type_offload = type; 459 } 460 EXPORT_SYMBOL(xfrm_set_type_offload); 461 462 static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = { 463 [XFRM_MODE_BEET] = { 464 .encap = XFRM_MODE_BEET, 465 .flags = XFRM_MODE_FLAG_TUNNEL, 466 .family = AF_INET, 467 }, 468 [XFRM_MODE_TRANSPORT] = { 469 .encap = XFRM_MODE_TRANSPORT, 470 .family = AF_INET, 471 }, 472 [XFRM_MODE_TUNNEL] = { 473 .encap = XFRM_MODE_TUNNEL, 474 .flags = XFRM_MODE_FLAG_TUNNEL, 475 .family = AF_INET, 476 }, 477 [XFRM_MODE_IPTFS] = { 478 .encap = XFRM_MODE_IPTFS, 479 .flags = XFRM_MODE_FLAG_TUNNEL, 480 .family = AF_INET, 481 }, 482 }; 483 484 static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = { 485 [XFRM_MODE_BEET] = { 486 .encap = XFRM_MODE_BEET, 487 .flags = XFRM_MODE_FLAG_TUNNEL, 488 .family = AF_INET6, 489 }, 490 [XFRM_MODE_ROUTEOPTIMIZATION] = { 491 .encap = XFRM_MODE_ROUTEOPTIMIZATION, 492 .family = AF_INET6, 493 }, 494 [XFRM_MODE_TRANSPORT] = { 495 .encap = XFRM_MODE_TRANSPORT, 496 .family = AF_INET6, 497 }, 498 [XFRM_MODE_TUNNEL] = { 499 .encap = XFRM_MODE_TUNNEL, 500 .flags = XFRM_MODE_FLAG_TUNNEL, 501 .family = AF_INET6, 502 }, 503 [XFRM_MODE_IPTFS] = { 504 .encap = XFRM_MODE_IPTFS, 505 .flags = XFRM_MODE_FLAG_TUNNEL, 506 .family = AF_INET6, 507 }, 508 }; 509 510 static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family) 511 { 512 const struct xfrm_mode *mode; 513 514 if (unlikely(encap >= XFRM_MODE_MAX)) 515 return NULL; 516 517 switch (family) { 518 case AF_INET: 519 mode = &xfrm4_mode_map[encap]; 520 if (mode->family == family) 521 return mode; 522 break; 523 case AF_INET6: 524 mode = &xfrm6_mode_map[encap]; 525 if (mode->family == family) 526 return mode; 527 break; 528 default: 529 break; 530 } 531 532 return NULL; 533 } 534 535 static const struct xfrm_mode_cbs __rcu *xfrm_mode_cbs_map[XFRM_MODE_MAX]; 536 static DEFINE_SPINLOCK(xfrm_mode_cbs_map_lock); 537 538 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs) 539 { 540 if (mode >= XFRM_MODE_MAX) 541 return -EINVAL; 542 543 spin_lock_bh(&xfrm_mode_cbs_map_lock); 544 rcu_assign_pointer(xfrm_mode_cbs_map[mode], mode_cbs); 545 spin_unlock_bh(&xfrm_mode_cbs_map_lock); 546 547 return 0; 548 } 549 EXPORT_SYMBOL(xfrm_register_mode_cbs); 550 551 void xfrm_unregister_mode_cbs(u8 mode) 552 { 553 if (mode >= XFRM_MODE_MAX) 554 return; 555 556 spin_lock_bh(&xfrm_mode_cbs_map_lock); 557 RCU_INIT_POINTER(xfrm_mode_cbs_map[mode], NULL); 558 spin_unlock_bh(&xfrm_mode_cbs_map_lock); 559 synchronize_rcu(); 560 } 561 EXPORT_SYMBOL(xfrm_unregister_mode_cbs); 562 563 static const struct xfrm_mode_cbs *xfrm_get_mode_cbs(u8 mode) 564 { 565 const struct xfrm_mode_cbs *cbs; 566 bool try_load = true; 567 568 if (mode >= XFRM_MODE_MAX) 569 return NULL; 570 571 retry: 572 rcu_read_lock(); 573 574 cbs = rcu_dereference(xfrm_mode_cbs_map[mode]); 575 if (cbs && !try_module_get(cbs->owner)) 576 cbs = NULL; 577 578 rcu_read_unlock(); 579 580 if (mode == XFRM_MODE_IPTFS && !cbs && try_load) { 581 request_module("xfrm-iptfs"); 582 try_load = false; 583 goto retry; 584 } 585 586 return cbs; 587 } 588 589 void xfrm_state_free(struct xfrm_state *x) 590 { 591 kmem_cache_free(xfrm_state_cache, x); 592 } 593 EXPORT_SYMBOL(xfrm_state_free); 594 595 static void xfrm_state_gc_destroy(struct xfrm_state *x) 596 { 597 if (x->mode_cbs && x->mode_cbs->destroy_state) 598 x->mode_cbs->destroy_state(x); 599 hrtimer_cancel(&x->mtimer); 600 timer_delete_sync(&x->rtimer); 601 kfree_sensitive(x->aead); 602 kfree_sensitive(x->aalg); 603 kfree_sensitive(x->ealg); 604 kfree(x->calg); 605 kfree(x->encap); 606 kfree(x->coaddr); 607 kfree(x->replay_esn); 608 kfree(x->preplay_esn); 609 xfrm_unset_type_offload(x); 610 if (x->type) { 611 x->type->destructor(x); 612 xfrm_put_type(x->type); 613 } 614 if (x->xfrag.page) 615 put_page(x->xfrag.page); 616 xfrm_dev_state_free(x); 617 security_xfrm_state_free(x); 618 xfrm_state_free(x); 619 } 620 621 static void xfrm_state_gc_task(struct work_struct *work) 622 { 623 struct xfrm_state *x; 624 struct hlist_node *tmp; 625 struct hlist_head gc_list; 626 627 spin_lock_bh(&xfrm_state_gc_lock); 628 hlist_move_list(&xfrm_state_gc_list, &gc_list); 629 spin_unlock_bh(&xfrm_state_gc_lock); 630 631 synchronize_rcu(); 632 633 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) 634 xfrm_state_gc_destroy(x); 635 } 636 637 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) 638 { 639 struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer); 640 enum hrtimer_restart ret = HRTIMER_NORESTART; 641 time64_t now = ktime_get_real_seconds(); 642 time64_t next = TIME64_MAX; 643 int warn = 0; 644 int err = 0; 645 646 spin_lock(&x->lock); 647 xfrm_dev_state_update_stats(x); 648 649 if (x->km.state == XFRM_STATE_DEAD) 650 goto out; 651 if (x->km.state == XFRM_STATE_EXPIRED) 652 goto expired; 653 if (x->lft.hard_add_expires_seconds) { 654 time64_t tmo = x->lft.hard_add_expires_seconds + 655 x->curlft.add_time - now; 656 if (tmo <= 0) { 657 if (x->xflags & XFRM_SOFT_EXPIRE) { 658 /* enter hard expire without soft expire first?! 659 * setting a new date could trigger this. 660 * workaround: fix x->curflt.add_time by below: 661 */ 662 x->curlft.add_time = now - x->saved_tmo - 1; 663 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo; 664 } else 665 goto expired; 666 } 667 if (tmo < next) 668 next = tmo; 669 } 670 if (x->lft.hard_use_expires_seconds) { 671 time64_t tmo = x->lft.hard_use_expires_seconds + 672 (READ_ONCE(x->curlft.use_time) ? : now) - now; 673 if (tmo <= 0) 674 goto expired; 675 if (tmo < next) 676 next = tmo; 677 } 678 if (x->km.dying) 679 goto resched; 680 if (x->lft.soft_add_expires_seconds) { 681 time64_t tmo = x->lft.soft_add_expires_seconds + 682 x->curlft.add_time - now; 683 if (tmo <= 0) { 684 warn = 1; 685 x->xflags &= ~XFRM_SOFT_EXPIRE; 686 } else if (tmo < next) { 687 next = tmo; 688 x->xflags |= XFRM_SOFT_EXPIRE; 689 x->saved_tmo = tmo; 690 } 691 } 692 if (x->lft.soft_use_expires_seconds) { 693 time64_t tmo = x->lft.soft_use_expires_seconds + 694 (READ_ONCE(x->curlft.use_time) ? : now) - now; 695 if (tmo <= 0) 696 warn = 1; 697 else if (tmo < next) 698 next = tmo; 699 } 700 701 x->km.dying = warn; 702 if (warn) 703 km_state_expired(x, 0, 0); 704 resched: 705 if (next != TIME64_MAX) { 706 hrtimer_forward_now(&x->mtimer, ktime_set(next, 0)); 707 ret = HRTIMER_RESTART; 708 } 709 710 goto out; 711 712 expired: 713 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) 714 x->km.state = XFRM_STATE_EXPIRED; 715 716 err = __xfrm_state_delete(x); 717 if (!err) 718 km_state_expired(x, 1, 0); 719 720 xfrm_audit_state_delete(x, err ? 0 : 1, true); 721 722 out: 723 spin_unlock(&x->lock); 724 return ret; 725 } 726 727 static void xfrm_replay_timer_handler(struct timer_list *t); 728 729 struct xfrm_state *xfrm_state_alloc(struct net *net) 730 { 731 struct xfrm_state *x; 732 733 x = kmem_cache_zalloc(xfrm_state_cache, GFP_ATOMIC); 734 735 if (x) { 736 write_pnet(&x->xs_net, net); 737 refcount_set(&x->refcnt, 1); 738 atomic_set(&x->tunnel_users, 0); 739 INIT_LIST_HEAD(&x->km.all); 740 INIT_HLIST_NODE(&x->state_cache); 741 INIT_HLIST_NODE(&x->bydst); 742 INIT_HLIST_NODE(&x->bysrc); 743 INIT_HLIST_NODE(&x->byspi); 744 INIT_HLIST_NODE(&x->byseq); 745 hrtimer_setup(&x->mtimer, xfrm_timer_handler, CLOCK_BOOTTIME, 746 HRTIMER_MODE_ABS_SOFT); 747 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0); 748 x->curlft.add_time = ktime_get_real_seconds(); 749 x->lft.soft_byte_limit = XFRM_INF; 750 x->lft.soft_packet_limit = XFRM_INF; 751 x->lft.hard_byte_limit = XFRM_INF; 752 x->lft.hard_packet_limit = XFRM_INF; 753 x->replay_maxage = 0; 754 x->replay_maxdiff = 0; 755 x->pcpu_num = UINT_MAX; 756 spin_lock_init(&x->lock); 757 x->mode_data = NULL; 758 } 759 return x; 760 } 761 EXPORT_SYMBOL(xfrm_state_alloc); 762 763 #ifdef CONFIG_XFRM_OFFLOAD 764 void xfrm_dev_state_delete(struct xfrm_state *x) 765 { 766 struct xfrm_dev_offload *xso = &x->xso; 767 struct net_device *dev = READ_ONCE(xso->dev); 768 769 if (dev) { 770 dev->xfrmdev_ops->xdo_dev_state_delete(dev, x); 771 spin_lock_bh(&xfrm_state_dev_gc_lock); 772 hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list); 773 spin_unlock_bh(&xfrm_state_dev_gc_lock); 774 } 775 } 776 EXPORT_SYMBOL_GPL(xfrm_dev_state_delete); 777 778 void xfrm_dev_state_free(struct xfrm_state *x) 779 { 780 struct xfrm_dev_offload *xso = &x->xso; 781 struct net_device *dev = READ_ONCE(xso->dev); 782 783 if (dev && dev->xfrmdev_ops) { 784 spin_lock_bh(&xfrm_state_dev_gc_lock); 785 if (!hlist_unhashed(&x->dev_gclist)) 786 hlist_del(&x->dev_gclist); 787 spin_unlock_bh(&xfrm_state_dev_gc_lock); 788 789 if (dev->xfrmdev_ops->xdo_dev_state_free) 790 dev->xfrmdev_ops->xdo_dev_state_free(dev, x); 791 WRITE_ONCE(xso->dev, NULL); 792 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 793 netdev_put(dev, &xso->dev_tracker); 794 } 795 } 796 #endif 797 798 void __xfrm_state_destroy(struct xfrm_state *x) 799 { 800 WARN_ON(x->km.state != XFRM_STATE_DEAD); 801 802 spin_lock_bh(&xfrm_state_gc_lock); 803 hlist_add_head(&x->gclist, &xfrm_state_gc_list); 804 spin_unlock_bh(&xfrm_state_gc_lock); 805 schedule_work(&xfrm_state_gc_work); 806 } 807 EXPORT_SYMBOL(__xfrm_state_destroy); 808 809 static void xfrm_state_delete_tunnel(struct xfrm_state *x); 810 int __xfrm_state_delete(struct xfrm_state *x) 811 { 812 struct net *net = xs_net(x); 813 int err = -ESRCH; 814 815 if (x->km.state != XFRM_STATE_DEAD) { 816 x->km.state = XFRM_STATE_DEAD; 817 818 spin_lock(&net->xfrm.xfrm_state_lock); 819 list_del(&x->km.all); 820 hlist_del_rcu(&x->bydst); 821 hlist_del_rcu(&x->bysrc); 822 if (x->km.seq) 823 hlist_del_rcu(&x->byseq); 824 if (!hlist_unhashed(&x->state_cache)) 825 hlist_del_rcu(&x->state_cache); 826 if (!hlist_unhashed(&x->state_cache_input)) 827 hlist_del_rcu(&x->state_cache_input); 828 829 if (x->id.spi) 830 hlist_del_rcu(&x->byspi); 831 net->xfrm.state_num--; 832 xfrm_nat_keepalive_state_updated(x); 833 spin_unlock(&net->xfrm.xfrm_state_lock); 834 835 xfrm_dev_state_delete(x); 836 837 xfrm_state_delete_tunnel(x); 838 839 /* All xfrm_state objects are created by xfrm_state_alloc. 840 * The xfrm_state_alloc call gives a reference, and that 841 * is what we are dropping here. 842 */ 843 xfrm_state_put(x); 844 err = 0; 845 } 846 847 return err; 848 } 849 EXPORT_SYMBOL(__xfrm_state_delete); 850 851 int xfrm_state_delete(struct xfrm_state *x) 852 { 853 int err; 854 855 spin_lock_bh(&x->lock); 856 err = __xfrm_state_delete(x); 857 spin_unlock_bh(&x->lock); 858 859 return err; 860 } 861 EXPORT_SYMBOL(xfrm_state_delete); 862 863 #ifdef CONFIG_SECURITY_NETWORK_XFRM 864 static inline int 865 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) 866 { 867 int i, err = 0; 868 869 for (i = 0; i <= net->xfrm.state_hmask; i++) { 870 struct xfrm_state *x; 871 872 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 873 if (xfrm_id_proto_match(x->id.proto, proto) && 874 (err = security_xfrm_state_delete(x)) != 0) { 875 xfrm_audit_state_delete(x, 0, task_valid); 876 return err; 877 } 878 } 879 } 880 881 return err; 882 } 883 884 static inline int 885 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid) 886 { 887 int i, err = 0; 888 889 for (i = 0; i <= net->xfrm.state_hmask; i++) { 890 struct xfrm_state *x; 891 struct xfrm_dev_offload *xso; 892 893 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 894 xso = &x->xso; 895 896 if (xso->dev == dev && 897 (err = security_xfrm_state_delete(x)) != 0) { 898 xfrm_audit_state_delete(x, 0, task_valid); 899 return err; 900 } 901 } 902 } 903 904 return err; 905 } 906 #else 907 static inline int 908 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) 909 { 910 return 0; 911 } 912 913 static inline int 914 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid) 915 { 916 return 0; 917 } 918 #endif 919 920 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) 921 { 922 int i, err = 0, cnt = 0; 923 924 spin_lock_bh(&net->xfrm.xfrm_state_lock); 925 err = xfrm_state_flush_secctx_check(net, proto, task_valid); 926 if (err) 927 goto out; 928 929 err = -ESRCH; 930 for (i = 0; i <= net->xfrm.state_hmask; i++) { 931 struct xfrm_state *x; 932 restart: 933 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 934 if (!xfrm_state_kern(x) && 935 xfrm_id_proto_match(x->id.proto, proto)) { 936 xfrm_state_hold(x); 937 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 938 939 err = xfrm_state_delete(x); 940 xfrm_audit_state_delete(x, err ? 0 : 1, 941 task_valid); 942 xfrm_state_put(x); 943 if (!err) 944 cnt++; 945 946 spin_lock_bh(&net->xfrm.xfrm_state_lock); 947 goto restart; 948 } 949 } 950 } 951 out: 952 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 953 if (cnt) 954 err = 0; 955 956 return err; 957 } 958 EXPORT_SYMBOL(xfrm_state_flush); 959 960 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid) 961 { 962 struct xfrm_state *x; 963 struct hlist_node *tmp; 964 struct xfrm_dev_offload *xso; 965 int i, err = 0, cnt = 0; 966 967 spin_lock_bh(&net->xfrm.xfrm_state_lock); 968 err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid); 969 if (err) 970 goto out; 971 972 err = -ESRCH; 973 for (i = 0; i <= net->xfrm.state_hmask; i++) { 974 restart: 975 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 976 xso = &x->xso; 977 978 if (!xfrm_state_kern(x) && xso->dev == dev) { 979 xfrm_state_hold(x); 980 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 981 982 err = xfrm_state_delete(x); 983 xfrm_dev_state_free(x); 984 985 xfrm_audit_state_delete(x, err ? 0 : 1, 986 task_valid); 987 xfrm_state_put(x); 988 if (!err) 989 cnt++; 990 991 spin_lock_bh(&net->xfrm.xfrm_state_lock); 992 goto restart; 993 } 994 } 995 } 996 if (cnt) 997 err = 0; 998 999 out: 1000 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1001 1002 spin_lock_bh(&xfrm_state_dev_gc_lock); 1003 restart_gc: 1004 hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) { 1005 xso = &x->xso; 1006 1007 if (xso->dev == dev) { 1008 spin_unlock_bh(&xfrm_state_dev_gc_lock); 1009 xfrm_dev_state_free(x); 1010 spin_lock_bh(&xfrm_state_dev_gc_lock); 1011 goto restart_gc; 1012 } 1013 1014 } 1015 spin_unlock_bh(&xfrm_state_dev_gc_lock); 1016 1017 xfrm_flush_gc(); 1018 1019 return err; 1020 } 1021 EXPORT_SYMBOL(xfrm_dev_state_flush); 1022 1023 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) 1024 { 1025 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1026 si->sadcnt = net->xfrm.state_num; 1027 si->sadhcnt = net->xfrm.state_hmask + 1; 1028 si->sadhmcnt = xfrm_state_hashmax; 1029 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1030 } 1031 EXPORT_SYMBOL(xfrm_sad_getinfo); 1032 1033 static void 1034 __xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) 1035 { 1036 const struct flowi4 *fl4 = &fl->u.ip4; 1037 1038 sel->daddr.a4 = fl4->daddr; 1039 sel->saddr.a4 = fl4->saddr; 1040 sel->dport = xfrm_flowi_dport(fl, &fl4->uli); 1041 sel->dport_mask = htons(0xffff); 1042 sel->sport = xfrm_flowi_sport(fl, &fl4->uli); 1043 sel->sport_mask = htons(0xffff); 1044 sel->family = AF_INET; 1045 sel->prefixlen_d = 32; 1046 sel->prefixlen_s = 32; 1047 sel->proto = fl4->flowi4_proto; 1048 sel->ifindex = fl4->flowi4_oif; 1049 } 1050 1051 static void 1052 __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) 1053 { 1054 const struct flowi6 *fl6 = &fl->u.ip6; 1055 1056 /* Initialize temporary selector matching only to current session. */ 1057 *(struct in6_addr *)&sel->daddr = fl6->daddr; 1058 *(struct in6_addr *)&sel->saddr = fl6->saddr; 1059 sel->dport = xfrm_flowi_dport(fl, &fl6->uli); 1060 sel->dport_mask = htons(0xffff); 1061 sel->sport = xfrm_flowi_sport(fl, &fl6->uli); 1062 sel->sport_mask = htons(0xffff); 1063 sel->family = AF_INET6; 1064 sel->prefixlen_d = 128; 1065 sel->prefixlen_s = 128; 1066 sel->proto = fl6->flowi6_proto; 1067 sel->ifindex = fl6->flowi6_oif; 1068 } 1069 1070 static void 1071 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl, 1072 const struct xfrm_tmpl *tmpl, 1073 const xfrm_address_t *daddr, const xfrm_address_t *saddr, 1074 unsigned short family) 1075 { 1076 switch (family) { 1077 case AF_INET: 1078 __xfrm4_init_tempsel(&x->sel, fl); 1079 break; 1080 case AF_INET6: 1081 __xfrm6_init_tempsel(&x->sel, fl); 1082 break; 1083 } 1084 1085 x->id = tmpl->id; 1086 1087 switch (tmpl->encap_family) { 1088 case AF_INET: 1089 if (x->id.daddr.a4 == 0) 1090 x->id.daddr.a4 = daddr->a4; 1091 x->props.saddr = tmpl->saddr; 1092 if (x->props.saddr.a4 == 0) 1093 x->props.saddr.a4 = saddr->a4; 1094 break; 1095 case AF_INET6: 1096 if (ipv6_addr_any((struct in6_addr *)&x->id.daddr)) 1097 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); 1098 memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr)); 1099 if (ipv6_addr_any((struct in6_addr *)&x->props.saddr)) 1100 memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr)); 1101 break; 1102 } 1103 1104 x->props.mode = tmpl->mode; 1105 x->props.reqid = tmpl->reqid; 1106 x->props.family = tmpl->encap_family; 1107 } 1108 1109 struct xfrm_hash_state_ptrs { 1110 const struct hlist_head *bydst; 1111 const struct hlist_head *bysrc; 1112 const struct hlist_head *byspi; 1113 unsigned int hmask; 1114 }; 1115 1116 static void xfrm_hash_ptrs_get(const struct net *net, struct xfrm_hash_state_ptrs *ptrs) 1117 { 1118 unsigned int sequence; 1119 1120 do { 1121 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); 1122 1123 ptrs->bydst = xfrm_state_deref_check(net->xfrm.state_bydst, net); 1124 ptrs->bysrc = xfrm_state_deref_check(net->xfrm.state_bysrc, net); 1125 ptrs->byspi = xfrm_state_deref_check(net->xfrm.state_byspi, net); 1126 ptrs->hmask = net->xfrm.state_hmask; 1127 } while (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)); 1128 } 1129 1130 static struct xfrm_state *__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs *state_ptrs, 1131 u32 mark, 1132 const xfrm_address_t *daddr, 1133 __be32 spi, u8 proto, 1134 unsigned short family, 1135 struct xfrm_dev_offload *xdo) 1136 { 1137 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask); 1138 struct xfrm_state *x; 1139 1140 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) { 1141 #ifdef CONFIG_XFRM_OFFLOAD 1142 if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) { 1143 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) 1144 /* HW states are in the head of list, there is 1145 * no need to iterate further. 1146 */ 1147 break; 1148 1149 /* Packet offload: both policy and SA should 1150 * have same device. 1151 */ 1152 if (xdo->dev != x->xso.dev) 1153 continue; 1154 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 1155 /* Skip HW policy for SW lookups */ 1156 continue; 1157 #endif 1158 if (x->props.family != family || 1159 x->id.spi != spi || 1160 x->id.proto != proto || 1161 !xfrm_addr_equal(&x->id.daddr, daddr, family)) 1162 continue; 1163 1164 if ((mark & x->mark.m) != x->mark.v) 1165 continue; 1166 if (!xfrm_state_hold_rcu(x)) 1167 continue; 1168 return x; 1169 } 1170 1171 return NULL; 1172 } 1173 1174 static struct xfrm_state *__xfrm_state_lookup(const struct xfrm_hash_state_ptrs *state_ptrs, 1175 u32 mark, 1176 const xfrm_address_t *daddr, 1177 __be32 spi, u8 proto, 1178 unsigned short family) 1179 { 1180 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask); 1181 struct xfrm_state *x; 1182 1183 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) { 1184 if (x->props.family != family || 1185 x->id.spi != spi || 1186 x->id.proto != proto || 1187 !xfrm_addr_equal(&x->id.daddr, daddr, family)) 1188 continue; 1189 1190 if ((mark & x->mark.m) != x->mark.v) 1191 continue; 1192 if (!xfrm_state_hold_rcu(x)) 1193 continue; 1194 return x; 1195 } 1196 1197 return NULL; 1198 } 1199 1200 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark, 1201 const xfrm_address_t *daddr, 1202 __be32 spi, u8 proto, 1203 unsigned short family) 1204 { 1205 struct xfrm_hash_state_ptrs state_ptrs; 1206 struct hlist_head *state_cache_input; 1207 struct xfrm_state *x = NULL; 1208 1209 state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input); 1210 1211 rcu_read_lock(); 1212 hlist_for_each_entry_rcu(x, state_cache_input, state_cache_input) { 1213 if (x->props.family != family || 1214 x->id.spi != spi || 1215 x->id.proto != proto || 1216 !xfrm_addr_equal(&x->id.daddr, daddr, family)) 1217 continue; 1218 1219 if ((mark & x->mark.m) != x->mark.v) 1220 continue; 1221 if (!xfrm_state_hold_rcu(x)) 1222 continue; 1223 goto out; 1224 } 1225 1226 xfrm_hash_ptrs_get(net, &state_ptrs); 1227 1228 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family); 1229 1230 if (x && x->km.state == XFRM_STATE_VALID) { 1231 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1232 if (hlist_unhashed(&x->state_cache_input)) { 1233 hlist_add_head_rcu(&x->state_cache_input, state_cache_input); 1234 } else { 1235 hlist_del_rcu(&x->state_cache_input); 1236 hlist_add_head_rcu(&x->state_cache_input, state_cache_input); 1237 } 1238 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1239 } 1240 1241 out: 1242 rcu_read_unlock(); 1243 return x; 1244 } 1245 EXPORT_SYMBOL(xfrm_input_state_lookup); 1246 1247 static struct xfrm_state *__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs *state_ptrs, 1248 u32 mark, 1249 const xfrm_address_t *daddr, 1250 const xfrm_address_t *saddr, 1251 u8 proto, unsigned short family) 1252 { 1253 unsigned int h = __xfrm_src_hash(daddr, saddr, family, state_ptrs->hmask); 1254 struct xfrm_state *x; 1255 1256 hlist_for_each_entry_rcu(x, state_ptrs->bysrc + h, bysrc) { 1257 if (x->props.family != family || 1258 x->id.proto != proto || 1259 !xfrm_addr_equal(&x->id.daddr, daddr, family) || 1260 !xfrm_addr_equal(&x->props.saddr, saddr, family)) 1261 continue; 1262 1263 if ((mark & x->mark.m) != x->mark.v) 1264 continue; 1265 if (!xfrm_state_hold_rcu(x)) 1266 continue; 1267 return x; 1268 } 1269 1270 return NULL; 1271 } 1272 1273 static inline struct xfrm_state * 1274 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family) 1275 { 1276 struct xfrm_hash_state_ptrs state_ptrs; 1277 struct net *net = xs_net(x); 1278 u32 mark = x->mark.v & x->mark.m; 1279 1280 xfrm_hash_ptrs_get(net, &state_ptrs); 1281 1282 if (use_spi) 1283 return __xfrm_state_lookup(&state_ptrs, mark, &x->id.daddr, 1284 x->id.spi, x->id.proto, family); 1285 else 1286 return __xfrm_state_lookup_byaddr(&state_ptrs, mark, 1287 &x->id.daddr, 1288 &x->props.saddr, 1289 x->id.proto, family); 1290 } 1291 1292 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision) 1293 { 1294 if (have_hash_collision && 1295 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax && 1296 net->xfrm.state_num > net->xfrm.state_hmask) 1297 schedule_work(&net->xfrm.state_hash_work); 1298 } 1299 1300 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, 1301 const struct flowi *fl, unsigned short family, 1302 struct xfrm_state **best, int *acq_in_progress, 1303 int *error, unsigned int pcpu_id) 1304 { 1305 /* Resolution logic: 1306 * 1. There is a valid state with matching selector. Done. 1307 * 2. Valid state with inappropriate selector. Skip. 1308 * 1309 * Entering area of "sysdeps". 1310 * 1311 * 3. If state is not valid, selector is temporary, it selects 1312 * only session which triggered previous resolution. Key 1313 * manager will do something to install a state with proper 1314 * selector. 1315 */ 1316 if (x->km.state == XFRM_STATE_VALID) { 1317 if ((x->sel.family && 1318 (x->sel.family != family || 1319 !xfrm_selector_match(&x->sel, fl, family))) || 1320 !security_xfrm_state_pol_flow_match(x, pol, 1321 &fl->u.__fl_common)) 1322 return; 1323 1324 if (x->pcpu_num != UINT_MAX && x->pcpu_num != pcpu_id) 1325 return; 1326 1327 if (!*best || 1328 ((*best)->pcpu_num == UINT_MAX && x->pcpu_num == pcpu_id) || 1329 (*best)->km.dying > x->km.dying || 1330 ((*best)->km.dying == x->km.dying && 1331 (*best)->curlft.add_time < x->curlft.add_time)) 1332 *best = x; 1333 } else if (x->km.state == XFRM_STATE_ACQ) { 1334 if (!*best || x->pcpu_num == pcpu_id) 1335 *acq_in_progress = 1; 1336 } else if (x->km.state == XFRM_STATE_ERROR || 1337 x->km.state == XFRM_STATE_EXPIRED) { 1338 if ((!x->sel.family || 1339 (x->sel.family == family && 1340 xfrm_selector_match(&x->sel, fl, family))) && 1341 security_xfrm_state_pol_flow_match(x, pol, 1342 &fl->u.__fl_common)) 1343 *error = -ESRCH; 1344 } 1345 } 1346 1347 struct xfrm_state * 1348 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, 1349 const struct flowi *fl, struct xfrm_tmpl *tmpl, 1350 struct xfrm_policy *pol, int *err, 1351 unsigned short family, u32 if_id) 1352 { 1353 static xfrm_address_t saddr_wildcard = { }; 1354 struct xfrm_hash_state_ptrs state_ptrs; 1355 struct net *net = xp_net(pol); 1356 unsigned int h, h_wildcard; 1357 struct xfrm_state *x, *x0, *to_put; 1358 int acquire_in_progress = 0; 1359 int error = 0; 1360 struct xfrm_state *best = NULL; 1361 u32 mark = pol->mark.v & pol->mark.m; 1362 unsigned short encap_family = tmpl->encap_family; 1363 unsigned int sequence; 1364 struct km_event c; 1365 unsigned int pcpu_id; 1366 bool cached = false; 1367 1368 /* We need the cpu id just as a lookup key, 1369 * we don't require it to be stable. 1370 */ 1371 pcpu_id = raw_smp_processor_id(); 1372 1373 to_put = NULL; 1374 1375 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); 1376 1377 rcu_read_lock(); 1378 xfrm_hash_ptrs_get(net, &state_ptrs); 1379 1380 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) { 1381 if (x->props.family == encap_family && 1382 x->props.reqid == tmpl->reqid && 1383 (mark & x->mark.m) == x->mark.v && 1384 x->if_id == if_id && 1385 !(x->props.flags & XFRM_STATE_WILDRECV) && 1386 xfrm_state_addr_check(x, daddr, saddr, encap_family) && 1387 tmpl->mode == x->props.mode && 1388 tmpl->id.proto == x->id.proto && 1389 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1390 xfrm_state_look_at(pol, x, fl, encap_family, 1391 &best, &acquire_in_progress, &error, pcpu_id); 1392 } 1393 1394 if (best) 1395 goto cached; 1396 1397 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) { 1398 if (x->props.family == encap_family && 1399 x->props.reqid == tmpl->reqid && 1400 (mark & x->mark.m) == x->mark.v && 1401 x->if_id == if_id && 1402 !(x->props.flags & XFRM_STATE_WILDRECV) && 1403 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) && 1404 tmpl->mode == x->props.mode && 1405 tmpl->id.proto == x->id.proto && 1406 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1407 xfrm_state_look_at(pol, x, fl, family, 1408 &best, &acquire_in_progress, &error, pcpu_id); 1409 } 1410 1411 cached: 1412 cached = true; 1413 if (best) 1414 goto found; 1415 else if (error) 1416 best = NULL; 1417 else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */ 1418 WARN_ON(1); 1419 1420 h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask); 1421 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) { 1422 #ifdef CONFIG_XFRM_OFFLOAD 1423 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { 1424 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) 1425 /* HW states are in the head of list, there is 1426 * no need to iterate further. 1427 */ 1428 break; 1429 1430 /* Packet offload: both policy and SA should 1431 * have same device. 1432 */ 1433 if (pol->xdo.dev != x->xso.dev) 1434 continue; 1435 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 1436 /* Skip HW policy for SW lookups */ 1437 continue; 1438 #endif 1439 if (x->props.family == encap_family && 1440 x->props.reqid == tmpl->reqid && 1441 (mark & x->mark.m) == x->mark.v && 1442 x->if_id == if_id && 1443 !(x->props.flags & XFRM_STATE_WILDRECV) && 1444 xfrm_state_addr_check(x, daddr, saddr, encap_family) && 1445 tmpl->mode == x->props.mode && 1446 tmpl->id.proto == x->id.proto && 1447 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1448 xfrm_state_look_at(pol, x, fl, family, 1449 &best, &acquire_in_progress, &error, pcpu_id); 1450 } 1451 if (best || acquire_in_progress) 1452 goto found; 1453 1454 h_wildcard = __xfrm_dst_hash(daddr, &saddr_wildcard, tmpl->reqid, 1455 encap_family, state_ptrs.hmask); 1456 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h_wildcard, bydst) { 1457 #ifdef CONFIG_XFRM_OFFLOAD 1458 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { 1459 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) 1460 /* HW states are in the head of list, there is 1461 * no need to iterate further. 1462 */ 1463 break; 1464 1465 /* Packet offload: both policy and SA should 1466 * have same device. 1467 */ 1468 if (pol->xdo.dev != x->xso.dev) 1469 continue; 1470 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 1471 /* Skip HW policy for SW lookups */ 1472 continue; 1473 #endif 1474 if (x->props.family == encap_family && 1475 x->props.reqid == tmpl->reqid && 1476 (mark & x->mark.m) == x->mark.v && 1477 x->if_id == if_id && 1478 !(x->props.flags & XFRM_STATE_WILDRECV) && 1479 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) && 1480 tmpl->mode == x->props.mode && 1481 tmpl->id.proto == x->id.proto && 1482 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1483 xfrm_state_look_at(pol, x, fl, family, 1484 &best, &acquire_in_progress, &error, pcpu_id); 1485 } 1486 1487 found: 1488 if (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || 1489 (best && (best->pcpu_num == pcpu_id))) 1490 x = best; 1491 1492 if (!x && !error && !acquire_in_progress) { 1493 if (tmpl->id.spi && 1494 (x0 = __xfrm_state_lookup_all(&state_ptrs, mark, daddr, 1495 tmpl->id.spi, tmpl->id.proto, 1496 encap_family, 1497 &pol->xdo)) != NULL) { 1498 to_put = x0; 1499 error = -EEXIST; 1500 goto out; 1501 } 1502 1503 c.net = net; 1504 /* If the KMs have no listeners (yet...), avoid allocating an SA 1505 * for each and every packet - garbage collection might not 1506 * handle the flood. 1507 */ 1508 if (!km_is_alive(&c)) { 1509 error = -ESRCH; 1510 goto out; 1511 } 1512 1513 x = xfrm_state_alloc(net); 1514 if (x == NULL) { 1515 error = -ENOMEM; 1516 goto out; 1517 } 1518 /* Initialize temporary state matching only 1519 * to current session. */ 1520 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family); 1521 memcpy(&x->mark, &pol->mark, sizeof(x->mark)); 1522 x->if_id = if_id; 1523 if ((pol->flags & XFRM_POLICY_CPU_ACQUIRE) && best) 1524 x->pcpu_num = pcpu_id; 1525 1526 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid); 1527 if (error) { 1528 x->km.state = XFRM_STATE_DEAD; 1529 to_put = x; 1530 x = NULL; 1531 goto out; 1532 } 1533 #ifdef CONFIG_XFRM_OFFLOAD 1534 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { 1535 struct xfrm_dev_offload *xdo = &pol->xdo; 1536 struct xfrm_dev_offload *xso = &x->xso; 1537 struct net_device *dev = xdo->dev; 1538 1539 xso->type = XFRM_DEV_OFFLOAD_PACKET; 1540 xso->dir = xdo->dir; 1541 xso->dev = dev; 1542 xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ; 1543 netdev_hold(dev, &xso->dev_tracker, GFP_ATOMIC); 1544 error = dev->xfrmdev_ops->xdo_dev_state_add(dev, x, 1545 NULL); 1546 if (error) { 1547 xso->dir = 0; 1548 netdev_put(dev, &xso->dev_tracker); 1549 xso->dev = NULL; 1550 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 1551 x->km.state = XFRM_STATE_DEAD; 1552 to_put = x; 1553 x = NULL; 1554 goto out; 1555 } 1556 } 1557 #endif 1558 if (km_query(x, tmpl, pol) == 0) { 1559 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1560 x->km.state = XFRM_STATE_ACQ; 1561 x->dir = XFRM_SA_DIR_OUT; 1562 list_add(&x->km.all, &net->xfrm.state_all); 1563 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); 1564 XFRM_STATE_INSERT(bydst, &x->bydst, 1565 net->xfrm.state_bydst + h, 1566 x->xso.type); 1567 h = xfrm_src_hash(net, daddr, saddr, encap_family); 1568 XFRM_STATE_INSERT(bysrc, &x->bysrc, 1569 net->xfrm.state_bysrc + h, 1570 x->xso.type); 1571 INIT_HLIST_NODE(&x->state_cache); 1572 if (x->id.spi) { 1573 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); 1574 XFRM_STATE_INSERT(byspi, &x->byspi, 1575 net->xfrm.state_byspi + h, 1576 x->xso.type); 1577 } 1578 if (x->km.seq) { 1579 h = xfrm_seq_hash(net, x->km.seq); 1580 XFRM_STATE_INSERT(byseq, &x->byseq, 1581 net->xfrm.state_byseq + h, 1582 x->xso.type); 1583 } 1584 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1585 hrtimer_start(&x->mtimer, 1586 ktime_set(net->xfrm.sysctl_acq_expires, 0), 1587 HRTIMER_MODE_REL_SOFT); 1588 net->xfrm.state_num++; 1589 xfrm_hash_grow_check(net, x->bydst.next != NULL); 1590 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1591 } else { 1592 #ifdef CONFIG_XFRM_OFFLOAD 1593 struct xfrm_dev_offload *xso = &x->xso; 1594 1595 if (xso->type == XFRM_DEV_OFFLOAD_PACKET) { 1596 xfrm_dev_state_delete(x); 1597 xfrm_dev_state_free(x); 1598 } 1599 #endif 1600 x->km.state = XFRM_STATE_DEAD; 1601 to_put = x; 1602 x = NULL; 1603 error = -ESRCH; 1604 } 1605 1606 /* Use the already installed 'fallback' while the CPU-specific 1607 * SA acquire is handled*/ 1608 if (best) 1609 x = best; 1610 } 1611 out: 1612 if (x) { 1613 if (!xfrm_state_hold_rcu(x)) { 1614 *err = -EAGAIN; 1615 x = NULL; 1616 } 1617 } else { 1618 *err = acquire_in_progress ? -EAGAIN : error; 1619 } 1620 1621 if (x && x->km.state == XFRM_STATE_VALID && !cached && 1622 (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || x->pcpu_num == pcpu_id)) { 1623 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1624 if (hlist_unhashed(&x->state_cache)) 1625 hlist_add_head_rcu(&x->state_cache, &pol->state_cache_list); 1626 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1627 } 1628 1629 rcu_read_unlock(); 1630 if (to_put) 1631 xfrm_state_put(to_put); 1632 1633 if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) { 1634 *err = -EAGAIN; 1635 if (x) { 1636 xfrm_state_put(x); 1637 x = NULL; 1638 } 1639 } 1640 1641 return x; 1642 } 1643 1644 struct xfrm_state * 1645 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id, 1646 xfrm_address_t *daddr, xfrm_address_t *saddr, 1647 unsigned short family, u8 mode, u8 proto, u32 reqid) 1648 { 1649 unsigned int h; 1650 struct xfrm_state *rx = NULL, *x = NULL; 1651 1652 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1653 h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 1654 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 1655 if (x->props.family == family && 1656 x->props.reqid == reqid && 1657 (mark & x->mark.m) == x->mark.v && 1658 x->if_id == if_id && 1659 !(x->props.flags & XFRM_STATE_WILDRECV) && 1660 xfrm_state_addr_check(x, daddr, saddr, family) && 1661 mode == x->props.mode && 1662 proto == x->id.proto && 1663 x->km.state == XFRM_STATE_VALID) { 1664 rx = x; 1665 break; 1666 } 1667 } 1668 1669 if (rx) 1670 xfrm_state_hold(rx); 1671 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1672 1673 1674 return rx; 1675 } 1676 EXPORT_SYMBOL(xfrm_stateonly_find); 1677 1678 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, 1679 unsigned short family) 1680 { 1681 struct xfrm_state *x; 1682 struct xfrm_state_walk *w; 1683 1684 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1685 list_for_each_entry(w, &net->xfrm.state_all, all) { 1686 x = container_of(w, struct xfrm_state, km); 1687 if (x->props.family != family || 1688 x->id.spi != spi) 1689 continue; 1690 1691 xfrm_state_hold(x); 1692 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1693 return x; 1694 } 1695 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1696 return NULL; 1697 } 1698 EXPORT_SYMBOL(xfrm_state_lookup_byspi); 1699 1700 static struct xfrm_state *xfrm_state_lookup_spi_proto(struct net *net, __be32 spi, u8 proto) 1701 { 1702 struct xfrm_state *x; 1703 unsigned int i; 1704 1705 rcu_read_lock(); 1706 for (i = 0; i <= net->xfrm.state_hmask; i++) { 1707 hlist_for_each_entry_rcu(x, &net->xfrm.state_byspi[i], byspi) { 1708 if (x->id.spi == spi && x->id.proto == proto) { 1709 if (!xfrm_state_hold_rcu(x)) 1710 continue; 1711 rcu_read_unlock(); 1712 return x; 1713 } 1714 } 1715 } 1716 rcu_read_unlock(); 1717 return NULL; 1718 } 1719 1720 static void __xfrm_state_insert(struct xfrm_state *x) 1721 { 1722 struct net *net = xs_net(x); 1723 unsigned int h; 1724 1725 list_add(&x->km.all, &net->xfrm.state_all); 1726 1727 /* Sanitize mark before store */ 1728 x->mark.v &= x->mark.m; 1729 1730 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, 1731 x->props.reqid, x->props.family); 1732 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h, 1733 x->xso.type); 1734 1735 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family); 1736 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h, 1737 x->xso.type); 1738 1739 if (x->id.spi) { 1740 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, 1741 x->props.family); 1742 1743 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, 1744 x->xso.type); 1745 } 1746 1747 if (x->km.seq) { 1748 h = xfrm_seq_hash(net, x->km.seq); 1749 1750 XFRM_STATE_INSERT(byseq, &x->byseq, net->xfrm.state_byseq + h, 1751 x->xso.type); 1752 } 1753 1754 hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT); 1755 if (x->replay_maxage) 1756 mod_timer(&x->rtimer, jiffies + x->replay_maxage); 1757 1758 net->xfrm.state_num++; 1759 1760 xfrm_hash_grow_check(net, x->bydst.next != NULL); 1761 xfrm_nat_keepalive_state_updated(x); 1762 } 1763 1764 /* net->xfrm.xfrm_state_lock is held */ 1765 static void __xfrm_state_bump_genids(struct xfrm_state *xnew) 1766 { 1767 struct net *net = xs_net(xnew); 1768 unsigned short family = xnew->props.family; 1769 u32 reqid = xnew->props.reqid; 1770 struct xfrm_state *x; 1771 unsigned int h; 1772 u32 mark = xnew->mark.v & xnew->mark.m; 1773 u32 if_id = xnew->if_id; 1774 u32 cpu_id = xnew->pcpu_num; 1775 1776 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); 1777 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 1778 if (x->props.family == family && 1779 x->props.reqid == reqid && 1780 x->if_id == if_id && 1781 x->pcpu_num == cpu_id && 1782 (mark & x->mark.m) == x->mark.v && 1783 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) && 1784 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family)) 1785 x->genid++; 1786 } 1787 } 1788 1789 void xfrm_state_insert(struct xfrm_state *x) 1790 { 1791 struct net *net = xs_net(x); 1792 1793 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1794 __xfrm_state_bump_genids(x); 1795 __xfrm_state_insert(x); 1796 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1797 } 1798 EXPORT_SYMBOL(xfrm_state_insert); 1799 1800 /* net->xfrm.xfrm_state_lock is held */ 1801 static struct xfrm_state *__find_acq_core(struct net *net, 1802 const struct xfrm_mark *m, 1803 unsigned short family, u8 mode, 1804 u32 reqid, u32 if_id, u32 pcpu_num, u8 proto, 1805 const xfrm_address_t *daddr, 1806 const xfrm_address_t *saddr, 1807 int create) 1808 { 1809 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 1810 struct xfrm_state *x; 1811 u32 mark = m->v & m->m; 1812 1813 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 1814 if (x->props.reqid != reqid || 1815 x->props.mode != mode || 1816 x->props.family != family || 1817 x->km.state != XFRM_STATE_ACQ || 1818 x->id.spi != 0 || 1819 x->id.proto != proto || 1820 (mark & x->mark.m) != x->mark.v || 1821 x->pcpu_num != pcpu_num || 1822 !xfrm_addr_equal(&x->id.daddr, daddr, family) || 1823 !xfrm_addr_equal(&x->props.saddr, saddr, family)) 1824 continue; 1825 1826 xfrm_state_hold(x); 1827 return x; 1828 } 1829 1830 if (!create) 1831 return NULL; 1832 1833 x = xfrm_state_alloc(net); 1834 if (likely(x)) { 1835 switch (family) { 1836 case AF_INET: 1837 x->sel.daddr.a4 = daddr->a4; 1838 x->sel.saddr.a4 = saddr->a4; 1839 x->sel.prefixlen_d = 32; 1840 x->sel.prefixlen_s = 32; 1841 x->props.saddr.a4 = saddr->a4; 1842 x->id.daddr.a4 = daddr->a4; 1843 break; 1844 1845 case AF_INET6: 1846 x->sel.daddr.in6 = daddr->in6; 1847 x->sel.saddr.in6 = saddr->in6; 1848 x->sel.prefixlen_d = 128; 1849 x->sel.prefixlen_s = 128; 1850 x->props.saddr.in6 = saddr->in6; 1851 x->id.daddr.in6 = daddr->in6; 1852 break; 1853 } 1854 1855 x->pcpu_num = pcpu_num; 1856 x->km.state = XFRM_STATE_ACQ; 1857 x->id.proto = proto; 1858 x->props.family = family; 1859 x->props.mode = mode; 1860 x->props.reqid = reqid; 1861 x->if_id = if_id; 1862 x->mark.v = m->v; 1863 x->mark.m = m->m; 1864 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1865 xfrm_state_hold(x); 1866 hrtimer_start(&x->mtimer, 1867 ktime_set(net->xfrm.sysctl_acq_expires, 0), 1868 HRTIMER_MODE_REL_SOFT); 1869 list_add(&x->km.all, &net->xfrm.state_all); 1870 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h, 1871 x->xso.type); 1872 h = xfrm_src_hash(net, daddr, saddr, family); 1873 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h, 1874 x->xso.type); 1875 1876 net->xfrm.state_num++; 1877 1878 xfrm_hash_grow_check(net, x->bydst.next != NULL); 1879 } 1880 1881 return x; 1882 } 1883 1884 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num); 1885 1886 int xfrm_state_add(struct xfrm_state *x) 1887 { 1888 struct net *net = xs_net(x); 1889 struct xfrm_state *x1, *to_put; 1890 int family; 1891 int err; 1892 u32 mark = x->mark.v & x->mark.m; 1893 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1894 1895 family = x->props.family; 1896 1897 to_put = NULL; 1898 1899 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1900 1901 x1 = __xfrm_state_locate(x, use_spi, family); 1902 if (x1) { 1903 to_put = x1; 1904 x1 = NULL; 1905 err = -EEXIST; 1906 goto out; 1907 } 1908 1909 if (use_spi && x->km.seq) { 1910 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq, x->pcpu_num); 1911 if (x1 && ((x1->id.proto != x->id.proto) || 1912 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) { 1913 to_put = x1; 1914 x1 = NULL; 1915 } 1916 } 1917 1918 if (use_spi && !x1) 1919 x1 = __find_acq_core(net, &x->mark, family, x->props.mode, 1920 x->props.reqid, x->if_id, x->pcpu_num, x->id.proto, 1921 &x->id.daddr, &x->props.saddr, 0); 1922 1923 __xfrm_state_bump_genids(x); 1924 __xfrm_state_insert(x); 1925 err = 0; 1926 1927 out: 1928 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1929 1930 if (x1) { 1931 xfrm_state_delete(x1); 1932 xfrm_state_put(x1); 1933 } 1934 1935 if (to_put) 1936 xfrm_state_put(to_put); 1937 1938 return err; 1939 } 1940 EXPORT_SYMBOL(xfrm_state_add); 1941 1942 #ifdef CONFIG_XFRM_MIGRATE 1943 static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security) 1944 { 1945 struct xfrm_user_sec_ctx *uctx; 1946 int size = sizeof(*uctx) + security->ctx_len; 1947 int err; 1948 1949 uctx = kmalloc(size, GFP_KERNEL); 1950 if (!uctx) 1951 return -ENOMEM; 1952 1953 uctx->exttype = XFRMA_SEC_CTX; 1954 uctx->len = size; 1955 uctx->ctx_doi = security->ctx_doi; 1956 uctx->ctx_alg = security->ctx_alg; 1957 uctx->ctx_len = security->ctx_len; 1958 memcpy(uctx + 1, security->ctx_str, security->ctx_len); 1959 err = security_xfrm_state_alloc(x, uctx); 1960 kfree(uctx); 1961 if (err) 1962 return err; 1963 1964 return 0; 1965 } 1966 1967 static struct xfrm_state *xfrm_state_clone_and_setup(struct xfrm_state *orig, 1968 struct xfrm_encap_tmpl *encap, 1969 struct xfrm_migrate *m) 1970 { 1971 struct net *net = xs_net(orig); 1972 struct xfrm_state *x = xfrm_state_alloc(net); 1973 if (!x) 1974 goto out; 1975 1976 memcpy(&x->id, &orig->id, sizeof(x->id)); 1977 memcpy(&x->sel, &orig->sel, sizeof(x->sel)); 1978 memcpy(&x->lft, &orig->lft, sizeof(x->lft)); 1979 x->props.mode = orig->props.mode; 1980 x->props.replay_window = orig->props.replay_window; 1981 x->props.reqid = orig->props.reqid; 1982 x->props.family = orig->props.family; 1983 x->props.saddr = orig->props.saddr; 1984 1985 if (orig->aalg) { 1986 x->aalg = xfrm_algo_auth_clone(orig->aalg); 1987 if (!x->aalg) 1988 goto error; 1989 } 1990 x->props.aalgo = orig->props.aalgo; 1991 1992 if (orig->aead) { 1993 x->aead = xfrm_algo_aead_clone(orig->aead); 1994 x->geniv = orig->geniv; 1995 if (!x->aead) 1996 goto error; 1997 } 1998 if (orig->ealg) { 1999 x->ealg = xfrm_algo_clone(orig->ealg); 2000 if (!x->ealg) 2001 goto error; 2002 } 2003 x->props.ealgo = orig->props.ealgo; 2004 2005 if (orig->calg) { 2006 x->calg = xfrm_algo_clone(orig->calg); 2007 if (!x->calg) 2008 goto error; 2009 } 2010 x->props.calgo = orig->props.calgo; 2011 2012 if (encap || orig->encap) { 2013 if (encap) 2014 x->encap = kmemdup(encap, sizeof(*x->encap), 2015 GFP_KERNEL); 2016 else 2017 x->encap = kmemdup(orig->encap, sizeof(*x->encap), 2018 GFP_KERNEL); 2019 2020 if (!x->encap) 2021 goto error; 2022 } 2023 2024 if (orig->security) 2025 if (clone_security(x, orig->security)) 2026 goto error; 2027 2028 if (orig->coaddr) { 2029 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr), 2030 GFP_KERNEL); 2031 if (!x->coaddr) 2032 goto error; 2033 } 2034 2035 if (orig->replay_esn) { 2036 if (xfrm_replay_clone(x, orig)) 2037 goto error; 2038 } 2039 2040 memcpy(&x->mark, &orig->mark, sizeof(x->mark)); 2041 memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark)); 2042 2043 x->props.flags = orig->props.flags; 2044 x->props.extra_flags = orig->props.extra_flags; 2045 2046 x->pcpu_num = orig->pcpu_num; 2047 x->if_id = orig->if_id; 2048 x->tfcpad = orig->tfcpad; 2049 x->replay_maxdiff = orig->replay_maxdiff; 2050 x->replay_maxage = orig->replay_maxage; 2051 memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft)); 2052 x->km.state = orig->km.state; 2053 x->km.seq = orig->km.seq; 2054 x->replay = orig->replay; 2055 x->preplay = orig->preplay; 2056 x->mapping_maxage = orig->mapping_maxage; 2057 x->lastused = orig->lastused; 2058 x->new_mapping = 0; 2059 x->new_mapping_sport = 0; 2060 x->dir = orig->dir; 2061 2062 x->mode_cbs = orig->mode_cbs; 2063 if (x->mode_cbs && x->mode_cbs->clone_state) { 2064 if (x->mode_cbs->clone_state(x, orig)) 2065 goto error; 2066 } 2067 2068 2069 x->props.family = m->new_family; 2070 memcpy(&x->id.daddr, &m->new_daddr, sizeof(x->id.daddr)); 2071 memcpy(&x->props.saddr, &m->new_saddr, sizeof(x->props.saddr)); 2072 2073 return x; 2074 2075 error: 2076 xfrm_state_put(x); 2077 out: 2078 return NULL; 2079 } 2080 2081 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net, 2082 u32 if_id) 2083 { 2084 unsigned int h; 2085 struct xfrm_state *x = NULL; 2086 2087 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2088 2089 if (m->reqid) { 2090 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr, 2091 m->reqid, m->old_family); 2092 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 2093 if (x->props.mode != m->mode || 2094 x->id.proto != m->proto) 2095 continue; 2096 if (m->reqid && x->props.reqid != m->reqid) 2097 continue; 2098 if (if_id != 0 && x->if_id != if_id) 2099 continue; 2100 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, 2101 m->old_family) || 2102 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, 2103 m->old_family)) 2104 continue; 2105 xfrm_state_hold(x); 2106 break; 2107 } 2108 } else { 2109 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr, 2110 m->old_family); 2111 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) { 2112 if (x->props.mode != m->mode || 2113 x->id.proto != m->proto) 2114 continue; 2115 if (if_id != 0 && x->if_id != if_id) 2116 continue; 2117 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, 2118 m->old_family) || 2119 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, 2120 m->old_family)) 2121 continue; 2122 xfrm_state_hold(x); 2123 break; 2124 } 2125 } 2126 2127 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2128 2129 return x; 2130 } 2131 EXPORT_SYMBOL(xfrm_migrate_state_find); 2132 2133 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, 2134 struct xfrm_migrate *m, 2135 struct xfrm_encap_tmpl *encap, 2136 struct net *net, 2137 struct xfrm_user_offload *xuo, 2138 struct netlink_ext_ack *extack) 2139 { 2140 struct xfrm_state *xc; 2141 2142 xc = xfrm_state_clone_and_setup(x, encap, m); 2143 if (!xc) 2144 return NULL; 2145 2146 if (xfrm_init_state(xc) < 0) 2147 goto error; 2148 2149 /* configure the hardware if offload is requested */ 2150 if (xuo && xfrm_dev_state_add(net, xc, xuo, extack)) 2151 goto error; 2152 2153 /* add state */ 2154 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) { 2155 /* a care is needed when the destination address of the 2156 state is to be updated as it is a part of triplet */ 2157 xfrm_state_insert(xc); 2158 } else { 2159 if (xfrm_state_add(xc) < 0) 2160 goto error; 2161 } 2162 2163 return xc; 2164 error: 2165 xfrm_state_put(xc); 2166 return NULL; 2167 } 2168 EXPORT_SYMBOL(xfrm_state_migrate); 2169 #endif 2170 2171 int xfrm_state_update(struct xfrm_state *x) 2172 { 2173 struct xfrm_state *x1, *to_put; 2174 int err; 2175 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 2176 struct net *net = xs_net(x); 2177 2178 to_put = NULL; 2179 2180 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2181 x1 = __xfrm_state_locate(x, use_spi, x->props.family); 2182 2183 err = -ESRCH; 2184 if (!x1) 2185 goto out; 2186 2187 if (xfrm_state_kern(x1)) { 2188 to_put = x1; 2189 err = -EEXIST; 2190 goto out; 2191 } 2192 2193 if (x1->km.state == XFRM_STATE_ACQ) { 2194 if (x->dir && x1->dir != x->dir) 2195 goto out; 2196 2197 __xfrm_state_insert(x); 2198 x = NULL; 2199 } else { 2200 if (x1->dir != x->dir) 2201 goto out; 2202 } 2203 err = 0; 2204 2205 out: 2206 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2207 2208 if (to_put) 2209 xfrm_state_put(to_put); 2210 2211 if (err) 2212 return err; 2213 2214 if (!x) { 2215 xfrm_state_delete(x1); 2216 xfrm_state_put(x1); 2217 return 0; 2218 } 2219 2220 err = -EINVAL; 2221 spin_lock_bh(&x1->lock); 2222 if (likely(x1->km.state == XFRM_STATE_VALID)) { 2223 if (x->encap && x1->encap && 2224 x->encap->encap_type == x1->encap->encap_type) 2225 memcpy(x1->encap, x->encap, sizeof(*x1->encap)); 2226 else if (x->encap || x1->encap) 2227 goto fail; 2228 2229 if (x->coaddr && x1->coaddr) { 2230 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr)); 2231 } 2232 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel))) 2233 memcpy(&x1->sel, &x->sel, sizeof(x1->sel)); 2234 memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); 2235 x1->km.dying = 0; 2236 2237 hrtimer_start(&x1->mtimer, ktime_set(1, 0), 2238 HRTIMER_MODE_REL_SOFT); 2239 if (READ_ONCE(x1->curlft.use_time)) 2240 xfrm_state_check_expire(x1); 2241 2242 if (x->props.smark.m || x->props.smark.v || x->if_id) { 2243 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2244 2245 if (x->props.smark.m || x->props.smark.v) 2246 x1->props.smark = x->props.smark; 2247 2248 if (x->if_id) 2249 x1->if_id = x->if_id; 2250 2251 __xfrm_state_bump_genids(x1); 2252 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2253 } 2254 2255 err = 0; 2256 x->km.state = XFRM_STATE_DEAD; 2257 __xfrm_state_put(x); 2258 } 2259 2260 fail: 2261 spin_unlock_bh(&x1->lock); 2262 2263 xfrm_state_put(x1); 2264 2265 return err; 2266 } 2267 EXPORT_SYMBOL(xfrm_state_update); 2268 2269 int xfrm_state_check_expire(struct xfrm_state *x) 2270 { 2271 /* All counters which are needed to decide if state is expired 2272 * are handled by SW for non-packet offload modes. Simply skip 2273 * the following update and save extra boilerplate in drivers. 2274 */ 2275 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 2276 xfrm_dev_state_update_stats(x); 2277 2278 if (!READ_ONCE(x->curlft.use_time)) 2279 WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds()); 2280 2281 if (x->curlft.bytes >= x->lft.hard_byte_limit || 2282 x->curlft.packets >= x->lft.hard_packet_limit) { 2283 x->km.state = XFRM_STATE_EXPIRED; 2284 hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT); 2285 return -EINVAL; 2286 } 2287 2288 if (!x->km.dying && 2289 (x->curlft.bytes >= x->lft.soft_byte_limit || 2290 x->curlft.packets >= x->lft.soft_packet_limit)) { 2291 x->km.dying = 1; 2292 km_state_expired(x, 0, 0); 2293 } 2294 return 0; 2295 } 2296 EXPORT_SYMBOL(xfrm_state_check_expire); 2297 2298 void xfrm_state_update_stats(struct net *net) 2299 { 2300 struct xfrm_state *x; 2301 int i; 2302 2303 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2304 for (i = 0; i <= net->xfrm.state_hmask; i++) { 2305 hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst) 2306 xfrm_dev_state_update_stats(x); 2307 } 2308 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2309 } 2310 2311 struct xfrm_state * 2312 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi, 2313 u8 proto, unsigned short family) 2314 { 2315 struct xfrm_hash_state_ptrs state_ptrs; 2316 struct xfrm_state *x; 2317 2318 rcu_read_lock(); 2319 xfrm_hash_ptrs_get(net, &state_ptrs); 2320 2321 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family); 2322 rcu_read_unlock(); 2323 return x; 2324 } 2325 EXPORT_SYMBOL(xfrm_state_lookup); 2326 2327 struct xfrm_state * 2328 xfrm_state_lookup_byaddr(struct net *net, u32 mark, 2329 const xfrm_address_t *daddr, const xfrm_address_t *saddr, 2330 u8 proto, unsigned short family) 2331 { 2332 struct xfrm_hash_state_ptrs state_ptrs; 2333 struct xfrm_state *x; 2334 2335 rcu_read_lock(); 2336 2337 xfrm_hash_ptrs_get(net, &state_ptrs); 2338 2339 x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family); 2340 rcu_read_unlock(); 2341 return x; 2342 } 2343 EXPORT_SYMBOL(xfrm_state_lookup_byaddr); 2344 2345 struct xfrm_state * 2346 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid, 2347 u32 if_id, u32 pcpu_num, u8 proto, const xfrm_address_t *daddr, 2348 const xfrm_address_t *saddr, int create, unsigned short family) 2349 { 2350 struct xfrm_state *x; 2351 2352 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2353 x = __find_acq_core(net, mark, family, mode, reqid, if_id, pcpu_num, 2354 proto, daddr, saddr, create); 2355 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2356 2357 return x; 2358 } 2359 EXPORT_SYMBOL(xfrm_find_acq); 2360 2361 #ifdef CONFIG_XFRM_SUB_POLICY 2362 #if IS_ENABLED(CONFIG_IPV6) 2363 /* distribution counting sort function for xfrm_state and xfrm_tmpl */ 2364 static void 2365 __xfrm6_sort(void **dst, void **src, int n, 2366 int (*cmp)(const void *p), int maxclass) 2367 { 2368 int count[XFRM_MAX_DEPTH] = { }; 2369 int class[XFRM_MAX_DEPTH]; 2370 int i; 2371 2372 for (i = 0; i < n; i++) { 2373 int c = cmp(src[i]); 2374 2375 class[i] = c; 2376 count[c]++; 2377 } 2378 2379 for (i = 2; i < maxclass; i++) 2380 count[i] += count[i - 1]; 2381 2382 for (i = 0; i < n; i++) { 2383 dst[count[class[i] - 1]++] = src[i]; 2384 src[i] = NULL; 2385 } 2386 } 2387 2388 /* Rule for xfrm_state: 2389 * 2390 * rule 1: select IPsec transport except AH 2391 * rule 2: select MIPv6 RO or inbound trigger 2392 * rule 3: select IPsec transport AH 2393 * rule 4: select IPsec tunnel 2394 * rule 5: others 2395 */ 2396 static int __xfrm6_state_sort_cmp(const void *p) 2397 { 2398 const struct xfrm_state *v = p; 2399 2400 switch (v->props.mode) { 2401 case XFRM_MODE_TRANSPORT: 2402 if (v->id.proto != IPPROTO_AH) 2403 return 1; 2404 else 2405 return 3; 2406 #if IS_ENABLED(CONFIG_IPV6_MIP6) 2407 case XFRM_MODE_ROUTEOPTIMIZATION: 2408 case XFRM_MODE_IN_TRIGGER: 2409 return 2; 2410 #endif 2411 case XFRM_MODE_TUNNEL: 2412 case XFRM_MODE_BEET: 2413 case XFRM_MODE_IPTFS: 2414 return 4; 2415 } 2416 return 5; 2417 } 2418 2419 /* Rule for xfrm_tmpl: 2420 * 2421 * rule 1: select IPsec transport 2422 * rule 2: select MIPv6 RO or inbound trigger 2423 * rule 3: select IPsec tunnel 2424 * rule 4: others 2425 */ 2426 static int __xfrm6_tmpl_sort_cmp(const void *p) 2427 { 2428 const struct xfrm_tmpl *v = p; 2429 2430 switch (v->mode) { 2431 case XFRM_MODE_TRANSPORT: 2432 return 1; 2433 #if IS_ENABLED(CONFIG_IPV6_MIP6) 2434 case XFRM_MODE_ROUTEOPTIMIZATION: 2435 case XFRM_MODE_IN_TRIGGER: 2436 return 2; 2437 #endif 2438 case XFRM_MODE_TUNNEL: 2439 case XFRM_MODE_BEET: 2440 case XFRM_MODE_IPTFS: 2441 return 3; 2442 } 2443 return 4; 2444 } 2445 #else 2446 static inline int __xfrm6_state_sort_cmp(const void *p) { return 5; } 2447 static inline int __xfrm6_tmpl_sort_cmp(const void *p) { return 4; } 2448 2449 static inline void 2450 __xfrm6_sort(void **dst, void **src, int n, 2451 int (*cmp)(const void *p), int maxclass) 2452 { 2453 int i; 2454 2455 for (i = 0; i < n; i++) 2456 dst[i] = src[i]; 2457 } 2458 #endif /* CONFIG_IPV6 */ 2459 2460 void 2461 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, 2462 unsigned short family) 2463 { 2464 int i; 2465 2466 if (family == AF_INET6) 2467 __xfrm6_sort((void **)dst, (void **)src, n, 2468 __xfrm6_tmpl_sort_cmp, 5); 2469 else 2470 for (i = 0; i < n; i++) 2471 dst[i] = src[i]; 2472 } 2473 2474 void 2475 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, 2476 unsigned short family) 2477 { 2478 int i; 2479 2480 if (family == AF_INET6) 2481 __xfrm6_sort((void **)dst, (void **)src, n, 2482 __xfrm6_state_sort_cmp, 6); 2483 else 2484 for (i = 0; i < n; i++) 2485 dst[i] = src[i]; 2486 } 2487 #endif 2488 2489 /* Silly enough, but I'm lazy to build resolution list */ 2490 2491 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num) 2492 { 2493 unsigned int h = xfrm_seq_hash(net, seq); 2494 struct xfrm_state *x; 2495 2496 hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) { 2497 if (x->km.seq == seq && 2498 (mark & x->mark.m) == x->mark.v && 2499 x->pcpu_num == pcpu_num && 2500 x->km.state == XFRM_STATE_ACQ) { 2501 xfrm_state_hold(x); 2502 return x; 2503 } 2504 } 2505 2506 return NULL; 2507 } 2508 2509 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num) 2510 { 2511 struct xfrm_state *x; 2512 2513 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2514 x = __xfrm_find_acq_byseq(net, mark, seq, pcpu_num); 2515 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2516 return x; 2517 } 2518 EXPORT_SYMBOL(xfrm_find_acq_byseq); 2519 2520 u32 xfrm_get_acqseq(void) 2521 { 2522 u32 res; 2523 static atomic_t acqseq; 2524 2525 do { 2526 res = atomic_inc_return(&acqseq); 2527 } while (!res); 2528 2529 return res; 2530 } 2531 EXPORT_SYMBOL(xfrm_get_acqseq); 2532 2533 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack) 2534 { 2535 switch (proto) { 2536 case IPPROTO_AH: 2537 case IPPROTO_ESP: 2538 break; 2539 2540 case IPPROTO_COMP: 2541 /* IPCOMP spi is 16-bits. */ 2542 if (max >= 0x10000) { 2543 NL_SET_ERR_MSG(extack, "IPCOMP SPI must be <= 65535"); 2544 return -EINVAL; 2545 } 2546 break; 2547 2548 default: 2549 NL_SET_ERR_MSG(extack, "Invalid protocol, must be one of AH, ESP, IPCOMP"); 2550 return -EINVAL; 2551 } 2552 2553 if (min > max) { 2554 NL_SET_ERR_MSG(extack, "Invalid SPI range: min > max"); 2555 return -EINVAL; 2556 } 2557 2558 return 0; 2559 } 2560 EXPORT_SYMBOL(verify_spi_info); 2561 2562 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high, 2563 struct netlink_ext_ack *extack) 2564 { 2565 struct net *net = xs_net(x); 2566 unsigned int h; 2567 struct xfrm_state *x0; 2568 int err = -ENOENT; 2569 u32 range = high - low + 1; 2570 __be32 newspi = 0; 2571 2572 spin_lock_bh(&x->lock); 2573 if (x->km.state == XFRM_STATE_DEAD) { 2574 NL_SET_ERR_MSG(extack, "Target ACQUIRE is in DEAD state"); 2575 goto unlock; 2576 } 2577 2578 err = 0; 2579 if (x->id.spi) 2580 goto unlock; 2581 2582 err = -ENOENT; 2583 2584 for (h = 0; h < range; h++) { 2585 u32 spi = (low == high) ? low : get_random_u32_inclusive(low, high); 2586 newspi = htonl(spi); 2587 2588 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2589 x0 = xfrm_state_lookup_spi_proto(net, newspi, x->id.proto); 2590 if (!x0) { 2591 x->id.spi = newspi; 2592 h = xfrm_spi_hash(net, &x->id.daddr, newspi, x->id.proto, x->props.family); 2593 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, x->xso.type); 2594 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2595 err = 0; 2596 goto unlock; 2597 } 2598 xfrm_state_put(x0); 2599 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2600 2601 if (signal_pending(current)) { 2602 err = -ERESTARTSYS; 2603 goto unlock; 2604 } 2605 2606 if (low == high) 2607 break; 2608 } 2609 2610 if (err) 2611 NL_SET_ERR_MSG(extack, "No SPI available in the requested range"); 2612 2613 unlock: 2614 spin_unlock_bh(&x->lock); 2615 2616 return err; 2617 } 2618 EXPORT_SYMBOL(xfrm_alloc_spi); 2619 2620 static bool __xfrm_state_filter_match(struct xfrm_state *x, 2621 struct xfrm_address_filter *filter) 2622 { 2623 if (filter) { 2624 if ((filter->family == AF_INET || 2625 filter->family == AF_INET6) && 2626 x->props.family != filter->family) 2627 return false; 2628 2629 return addr_match(&x->props.saddr, &filter->saddr, 2630 filter->splen) && 2631 addr_match(&x->id.daddr, &filter->daddr, 2632 filter->dplen); 2633 } 2634 return true; 2635 } 2636 2637 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, 2638 int (*func)(struct xfrm_state *, int, void*), 2639 void *data) 2640 { 2641 struct xfrm_state *state; 2642 struct xfrm_state_walk *x; 2643 int err = 0; 2644 2645 if (walk->seq != 0 && list_empty(&walk->all)) 2646 return 0; 2647 2648 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2649 if (list_empty(&walk->all)) 2650 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all); 2651 else 2652 x = list_first_entry(&walk->all, struct xfrm_state_walk, all); 2653 list_for_each_entry_from(x, &net->xfrm.state_all, all) { 2654 if (x->state == XFRM_STATE_DEAD) 2655 continue; 2656 state = container_of(x, struct xfrm_state, km); 2657 if (!xfrm_id_proto_match(state->id.proto, walk->proto)) 2658 continue; 2659 if (!__xfrm_state_filter_match(state, walk->filter)) 2660 continue; 2661 err = func(state, walk->seq, data); 2662 if (err) { 2663 list_move_tail(&walk->all, &x->all); 2664 goto out; 2665 } 2666 walk->seq++; 2667 } 2668 if (walk->seq == 0) { 2669 err = -ENOENT; 2670 goto out; 2671 } 2672 list_del_init(&walk->all); 2673 out: 2674 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2675 return err; 2676 } 2677 EXPORT_SYMBOL(xfrm_state_walk); 2678 2679 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto, 2680 struct xfrm_address_filter *filter) 2681 { 2682 INIT_LIST_HEAD(&walk->all); 2683 walk->proto = proto; 2684 walk->state = XFRM_STATE_DEAD; 2685 walk->seq = 0; 2686 walk->filter = filter; 2687 } 2688 EXPORT_SYMBOL(xfrm_state_walk_init); 2689 2690 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net) 2691 { 2692 kfree(walk->filter); 2693 2694 if (list_empty(&walk->all)) 2695 return; 2696 2697 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2698 list_del(&walk->all); 2699 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2700 } 2701 EXPORT_SYMBOL(xfrm_state_walk_done); 2702 2703 static void xfrm_replay_timer_handler(struct timer_list *t) 2704 { 2705 struct xfrm_state *x = timer_container_of(x, t, rtimer); 2706 2707 spin_lock(&x->lock); 2708 2709 if (x->km.state == XFRM_STATE_VALID) { 2710 if (xfrm_aevent_is_on(xs_net(x))) 2711 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT); 2712 else 2713 x->xflags |= XFRM_TIME_DEFER; 2714 } 2715 2716 spin_unlock(&x->lock); 2717 } 2718 2719 static LIST_HEAD(xfrm_km_list); 2720 2721 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2722 { 2723 struct xfrm_mgr *km; 2724 2725 rcu_read_lock(); 2726 list_for_each_entry_rcu(km, &xfrm_km_list, list) 2727 if (km->notify_policy) 2728 km->notify_policy(xp, dir, c); 2729 rcu_read_unlock(); 2730 } 2731 2732 void km_state_notify(struct xfrm_state *x, const struct km_event *c) 2733 { 2734 struct xfrm_mgr *km; 2735 rcu_read_lock(); 2736 list_for_each_entry_rcu(km, &xfrm_km_list, list) 2737 if (km->notify) 2738 km->notify(x, c); 2739 rcu_read_unlock(); 2740 } 2741 2742 EXPORT_SYMBOL(km_policy_notify); 2743 EXPORT_SYMBOL(km_state_notify); 2744 2745 void km_state_expired(struct xfrm_state *x, int hard, u32 portid) 2746 { 2747 struct km_event c; 2748 2749 c.data.hard = hard; 2750 c.portid = portid; 2751 c.event = XFRM_MSG_EXPIRE; 2752 km_state_notify(x, &c); 2753 } 2754 2755 EXPORT_SYMBOL(km_state_expired); 2756 /* 2757 * We send to all registered managers regardless of failure 2758 * We are happy with one success 2759 */ 2760 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol) 2761 { 2762 int err = -EINVAL, acqret; 2763 struct xfrm_mgr *km; 2764 2765 rcu_read_lock(); 2766 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2767 acqret = km->acquire(x, t, pol); 2768 if (!acqret) 2769 err = acqret; 2770 } 2771 rcu_read_unlock(); 2772 return err; 2773 } 2774 EXPORT_SYMBOL(km_query); 2775 2776 static int __km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) 2777 { 2778 int err = -EINVAL; 2779 struct xfrm_mgr *km; 2780 2781 rcu_read_lock(); 2782 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2783 if (km->new_mapping) 2784 err = km->new_mapping(x, ipaddr, sport); 2785 if (!err) 2786 break; 2787 } 2788 rcu_read_unlock(); 2789 return err; 2790 } 2791 2792 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) 2793 { 2794 int ret = 0; 2795 2796 if (x->mapping_maxage) { 2797 if ((jiffies / HZ - x->new_mapping) > x->mapping_maxage || 2798 x->new_mapping_sport != sport) { 2799 x->new_mapping_sport = sport; 2800 x->new_mapping = jiffies / HZ; 2801 ret = __km_new_mapping(x, ipaddr, sport); 2802 } 2803 } else { 2804 ret = __km_new_mapping(x, ipaddr, sport); 2805 } 2806 2807 return ret; 2808 } 2809 EXPORT_SYMBOL(km_new_mapping); 2810 2811 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid) 2812 { 2813 struct km_event c; 2814 2815 c.data.hard = hard; 2816 c.portid = portid; 2817 c.event = XFRM_MSG_POLEXPIRE; 2818 km_policy_notify(pol, dir, &c); 2819 } 2820 EXPORT_SYMBOL(km_policy_expired); 2821 2822 #ifdef CONFIG_XFRM_MIGRATE 2823 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2824 const struct xfrm_migrate *m, int num_migrate, 2825 const struct xfrm_kmaddress *k, 2826 const struct xfrm_encap_tmpl *encap) 2827 { 2828 int err = -EINVAL; 2829 int ret; 2830 struct xfrm_mgr *km; 2831 2832 rcu_read_lock(); 2833 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2834 if (km->migrate) { 2835 ret = km->migrate(sel, dir, type, m, num_migrate, k, 2836 encap); 2837 if (!ret) 2838 err = ret; 2839 } 2840 } 2841 rcu_read_unlock(); 2842 return err; 2843 } 2844 EXPORT_SYMBOL(km_migrate); 2845 #endif 2846 2847 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) 2848 { 2849 int err = -EINVAL; 2850 int ret; 2851 struct xfrm_mgr *km; 2852 2853 rcu_read_lock(); 2854 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2855 if (km->report) { 2856 ret = km->report(net, proto, sel, addr); 2857 if (!ret) 2858 err = ret; 2859 } 2860 } 2861 rcu_read_unlock(); 2862 return err; 2863 } 2864 EXPORT_SYMBOL(km_report); 2865 2866 static bool km_is_alive(const struct km_event *c) 2867 { 2868 struct xfrm_mgr *km; 2869 bool is_alive = false; 2870 2871 rcu_read_lock(); 2872 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2873 if (km->is_alive && km->is_alive(c)) { 2874 is_alive = true; 2875 break; 2876 } 2877 } 2878 rcu_read_unlock(); 2879 2880 return is_alive; 2881 } 2882 2883 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT) 2884 static DEFINE_SPINLOCK(xfrm_translator_lock); 2885 static struct xfrm_translator __rcu *xfrm_translator; 2886 2887 struct xfrm_translator *xfrm_get_translator(void) 2888 { 2889 struct xfrm_translator *xtr; 2890 2891 rcu_read_lock(); 2892 xtr = rcu_dereference(xfrm_translator); 2893 if (unlikely(!xtr)) 2894 goto out; 2895 if (!try_module_get(xtr->owner)) 2896 xtr = NULL; 2897 out: 2898 rcu_read_unlock(); 2899 return xtr; 2900 } 2901 EXPORT_SYMBOL_GPL(xfrm_get_translator); 2902 2903 void xfrm_put_translator(struct xfrm_translator *xtr) 2904 { 2905 module_put(xtr->owner); 2906 } 2907 EXPORT_SYMBOL_GPL(xfrm_put_translator); 2908 2909 int xfrm_register_translator(struct xfrm_translator *xtr) 2910 { 2911 int err = 0; 2912 2913 spin_lock_bh(&xfrm_translator_lock); 2914 if (unlikely(xfrm_translator != NULL)) 2915 err = -EEXIST; 2916 else 2917 rcu_assign_pointer(xfrm_translator, xtr); 2918 spin_unlock_bh(&xfrm_translator_lock); 2919 2920 return err; 2921 } 2922 EXPORT_SYMBOL_GPL(xfrm_register_translator); 2923 2924 int xfrm_unregister_translator(struct xfrm_translator *xtr) 2925 { 2926 int err = 0; 2927 2928 spin_lock_bh(&xfrm_translator_lock); 2929 if (likely(xfrm_translator != NULL)) { 2930 if (rcu_access_pointer(xfrm_translator) != xtr) 2931 err = -EINVAL; 2932 else 2933 RCU_INIT_POINTER(xfrm_translator, NULL); 2934 } 2935 spin_unlock_bh(&xfrm_translator_lock); 2936 synchronize_rcu(); 2937 2938 return err; 2939 } 2940 EXPORT_SYMBOL_GPL(xfrm_unregister_translator); 2941 #endif 2942 2943 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen) 2944 { 2945 int err; 2946 u8 *data; 2947 struct xfrm_mgr *km; 2948 struct xfrm_policy *pol = NULL; 2949 2950 if (sockptr_is_null(optval) && !optlen) { 2951 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); 2952 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); 2953 __sk_dst_reset(sk); 2954 return 0; 2955 } 2956 2957 if (optlen <= 0 || optlen > PAGE_SIZE) 2958 return -EMSGSIZE; 2959 2960 data = memdup_sockptr(optval, optlen); 2961 if (IS_ERR(data)) 2962 return PTR_ERR(data); 2963 2964 if (in_compat_syscall()) { 2965 struct xfrm_translator *xtr = xfrm_get_translator(); 2966 2967 if (!xtr) { 2968 kfree(data); 2969 return -EOPNOTSUPP; 2970 } 2971 2972 err = xtr->xlate_user_policy_sockptr(&data, optlen); 2973 xfrm_put_translator(xtr); 2974 if (err) { 2975 kfree(data); 2976 return err; 2977 } 2978 } 2979 2980 err = -EINVAL; 2981 rcu_read_lock(); 2982 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2983 pol = km->compile_policy(sk, optname, data, 2984 optlen, &err); 2985 if (err >= 0) 2986 break; 2987 } 2988 rcu_read_unlock(); 2989 2990 if (err >= 0) { 2991 xfrm_sk_policy_insert(sk, err, pol); 2992 xfrm_pol_put(pol); 2993 __sk_dst_reset(sk); 2994 err = 0; 2995 } 2996 2997 kfree(data); 2998 return err; 2999 } 3000 EXPORT_SYMBOL(xfrm_user_policy); 3001 3002 static DEFINE_SPINLOCK(xfrm_km_lock); 3003 3004 void xfrm_register_km(struct xfrm_mgr *km) 3005 { 3006 spin_lock_bh(&xfrm_km_lock); 3007 list_add_tail_rcu(&km->list, &xfrm_km_list); 3008 spin_unlock_bh(&xfrm_km_lock); 3009 } 3010 EXPORT_SYMBOL(xfrm_register_km); 3011 3012 void xfrm_unregister_km(struct xfrm_mgr *km) 3013 { 3014 spin_lock_bh(&xfrm_km_lock); 3015 list_del_rcu(&km->list); 3016 spin_unlock_bh(&xfrm_km_lock); 3017 synchronize_rcu(); 3018 } 3019 EXPORT_SYMBOL(xfrm_unregister_km); 3020 3021 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) 3022 { 3023 int err = 0; 3024 3025 if (WARN_ON(afinfo->family >= NPROTO)) 3026 return -EAFNOSUPPORT; 3027 3028 spin_lock_bh(&xfrm_state_afinfo_lock); 3029 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) 3030 err = -EEXIST; 3031 else 3032 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo); 3033 spin_unlock_bh(&xfrm_state_afinfo_lock); 3034 return err; 3035 } 3036 EXPORT_SYMBOL(xfrm_state_register_afinfo); 3037 3038 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) 3039 { 3040 int err = 0, family = afinfo->family; 3041 3042 if (WARN_ON(family >= NPROTO)) 3043 return -EAFNOSUPPORT; 3044 3045 spin_lock_bh(&xfrm_state_afinfo_lock); 3046 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { 3047 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo) 3048 err = -EINVAL; 3049 else 3050 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL); 3051 } 3052 spin_unlock_bh(&xfrm_state_afinfo_lock); 3053 synchronize_rcu(); 3054 return err; 3055 } 3056 EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 3057 3058 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family) 3059 { 3060 if (unlikely(family >= NPROTO)) 3061 return NULL; 3062 3063 return rcu_dereference(xfrm_state_afinfo[family]); 3064 } 3065 EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu); 3066 3067 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) 3068 { 3069 struct xfrm_state_afinfo *afinfo; 3070 if (unlikely(family >= NPROTO)) 3071 return NULL; 3072 rcu_read_lock(); 3073 afinfo = rcu_dereference(xfrm_state_afinfo[family]); 3074 if (unlikely(!afinfo)) 3075 rcu_read_unlock(); 3076 return afinfo; 3077 } 3078 3079 void xfrm_flush_gc(void) 3080 { 3081 flush_work(&xfrm_state_gc_work); 3082 } 3083 EXPORT_SYMBOL(xfrm_flush_gc); 3084 3085 static void xfrm_state_delete_tunnel(struct xfrm_state *x) 3086 { 3087 if (x->tunnel) { 3088 struct xfrm_state *t = x->tunnel; 3089 3090 if (atomic_dec_return(&t->tunnel_users) == 1) 3091 xfrm_state_delete(t); 3092 xfrm_state_put(t); 3093 x->tunnel = NULL; 3094 } 3095 } 3096 3097 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) 3098 { 3099 const struct xfrm_type *type = READ_ONCE(x->type); 3100 struct crypto_aead *aead; 3101 u32 blksize, net_adj = 0; 3102 3103 if (x->km.state != XFRM_STATE_VALID || 3104 !type || type->proto != IPPROTO_ESP) 3105 return mtu - x->props.header_len; 3106 3107 aead = x->data; 3108 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 3109 3110 switch (x->props.mode) { 3111 case XFRM_MODE_TRANSPORT: 3112 case XFRM_MODE_BEET: 3113 if (x->props.family == AF_INET) 3114 net_adj = sizeof(struct iphdr); 3115 else if (x->props.family == AF_INET6) 3116 net_adj = sizeof(struct ipv6hdr); 3117 break; 3118 case XFRM_MODE_TUNNEL: 3119 break; 3120 default: 3121 if (x->mode_cbs && x->mode_cbs->get_inner_mtu) 3122 return x->mode_cbs->get_inner_mtu(x, mtu); 3123 3124 WARN_ON_ONCE(1); 3125 break; 3126 } 3127 3128 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) - 3129 net_adj) & ~(blksize - 1)) + net_adj - 2; 3130 } 3131 EXPORT_SYMBOL_GPL(xfrm_state_mtu); 3132 3133 int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) 3134 { 3135 const struct xfrm_mode *inner_mode; 3136 const struct xfrm_mode *outer_mode; 3137 int family = x->props.family; 3138 int err; 3139 3140 if (family == AF_INET && 3141 READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)) 3142 x->props.flags |= XFRM_STATE_NOPMTUDISC; 3143 3144 err = -EPROTONOSUPPORT; 3145 3146 if (x->sel.family != AF_UNSPEC) { 3147 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family); 3148 if (inner_mode == NULL) { 3149 NL_SET_ERR_MSG(extack, "Requested mode not found"); 3150 goto error; 3151 } 3152 3153 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) && 3154 family != x->sel.family) { 3155 NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate a change of family"); 3156 goto error; 3157 } 3158 3159 x->inner_mode = *inner_mode; 3160 } else { 3161 const struct xfrm_mode *inner_mode_iaf; 3162 int iafamily = AF_INET; 3163 3164 inner_mode = xfrm_get_mode(x->props.mode, x->props.family); 3165 if (inner_mode == NULL) { 3166 NL_SET_ERR_MSG(extack, "Requested mode not found"); 3167 goto error; 3168 } 3169 3170 x->inner_mode = *inner_mode; 3171 3172 if (x->props.family == AF_INET) 3173 iafamily = AF_INET6; 3174 3175 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily); 3176 if (inner_mode_iaf) { 3177 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL) 3178 x->inner_mode_iaf = *inner_mode_iaf; 3179 } 3180 } 3181 3182 x->type = xfrm_get_type(x->id.proto, family); 3183 if (x->type == NULL) { 3184 NL_SET_ERR_MSG(extack, "Requested type not found"); 3185 goto error; 3186 } 3187 3188 err = x->type->init_state(x, extack); 3189 if (err) 3190 goto error; 3191 3192 outer_mode = xfrm_get_mode(x->props.mode, family); 3193 if (!outer_mode) { 3194 NL_SET_ERR_MSG(extack, "Requested mode not found"); 3195 err = -EPROTONOSUPPORT; 3196 goto error; 3197 } 3198 3199 x->outer_mode = *outer_mode; 3200 if (x->nat_keepalive_interval) { 3201 if (x->dir != XFRM_SA_DIR_OUT) { 3202 NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs"); 3203 err = -EINVAL; 3204 goto error; 3205 } 3206 3207 if (!x->encap || x->encap->encap_type != UDP_ENCAP_ESPINUDP) { 3208 NL_SET_ERR_MSG(extack, 3209 "NAT keepalive is only supported for UDP encapsulation"); 3210 err = -EINVAL; 3211 goto error; 3212 } 3213 } 3214 3215 x->mode_cbs = xfrm_get_mode_cbs(x->props.mode); 3216 if (x->mode_cbs) { 3217 if (x->mode_cbs->init_state) 3218 err = x->mode_cbs->init_state(x); 3219 module_put(x->mode_cbs->owner); 3220 } 3221 error: 3222 return err; 3223 } 3224 3225 EXPORT_SYMBOL(__xfrm_init_state); 3226 3227 int xfrm_init_state(struct xfrm_state *x) 3228 { 3229 int err; 3230 3231 err = __xfrm_init_state(x, NULL); 3232 if (err) 3233 return err; 3234 3235 err = xfrm_init_replay(x, NULL); 3236 if (err) 3237 return err; 3238 3239 x->km.state = XFRM_STATE_VALID; 3240 return 0; 3241 } 3242 3243 EXPORT_SYMBOL(xfrm_init_state); 3244 3245 int __net_init xfrm_state_init(struct net *net) 3246 { 3247 unsigned int sz; 3248 3249 if (net_eq(net, &init_net)) 3250 xfrm_state_cache = KMEM_CACHE(xfrm_state, 3251 SLAB_HWCACHE_ALIGN | SLAB_PANIC); 3252 3253 INIT_LIST_HEAD(&net->xfrm.state_all); 3254 3255 sz = sizeof(struct hlist_head) * 8; 3256 3257 net->xfrm.state_bydst = xfrm_hash_alloc(sz); 3258 if (!net->xfrm.state_bydst) 3259 goto out_bydst; 3260 net->xfrm.state_bysrc = xfrm_hash_alloc(sz); 3261 if (!net->xfrm.state_bysrc) 3262 goto out_bysrc; 3263 net->xfrm.state_byspi = xfrm_hash_alloc(sz); 3264 if (!net->xfrm.state_byspi) 3265 goto out_byspi; 3266 net->xfrm.state_byseq = xfrm_hash_alloc(sz); 3267 if (!net->xfrm.state_byseq) 3268 goto out_byseq; 3269 3270 net->xfrm.state_cache_input = alloc_percpu(struct hlist_head); 3271 if (!net->xfrm.state_cache_input) 3272 goto out_state_cache_input; 3273 3274 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1); 3275 3276 net->xfrm.state_num = 0; 3277 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); 3278 spin_lock_init(&net->xfrm.xfrm_state_lock); 3279 seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation, 3280 &net->xfrm.xfrm_state_lock); 3281 return 0; 3282 3283 out_state_cache_input: 3284 xfrm_hash_free(net->xfrm.state_byseq, sz); 3285 out_byseq: 3286 xfrm_hash_free(net->xfrm.state_byspi, sz); 3287 out_byspi: 3288 xfrm_hash_free(net->xfrm.state_bysrc, sz); 3289 out_bysrc: 3290 xfrm_hash_free(net->xfrm.state_bydst, sz); 3291 out_bydst: 3292 return -ENOMEM; 3293 } 3294 3295 void xfrm_state_fini(struct net *net) 3296 { 3297 unsigned int sz; 3298 3299 flush_work(&net->xfrm.state_hash_work); 3300 xfrm_state_flush(net, IPSEC_PROTO_ANY, false); 3301 flush_work(&xfrm_state_gc_work); 3302 3303 WARN_ON(!list_empty(&net->xfrm.state_all)); 3304 3305 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head); 3306 WARN_ON(!hlist_empty(net->xfrm.state_byseq)); 3307 xfrm_hash_free(net->xfrm.state_byseq, sz); 3308 WARN_ON(!hlist_empty(net->xfrm.state_byspi)); 3309 xfrm_hash_free(net->xfrm.state_byspi, sz); 3310 WARN_ON(!hlist_empty(net->xfrm.state_bysrc)); 3311 xfrm_hash_free(net->xfrm.state_bysrc, sz); 3312 WARN_ON(!hlist_empty(net->xfrm.state_bydst)); 3313 xfrm_hash_free(net->xfrm.state_bydst, sz); 3314 free_percpu(net->xfrm.state_cache_input); 3315 } 3316 3317 #ifdef CONFIG_AUDITSYSCALL 3318 static void xfrm_audit_helper_sainfo(struct xfrm_state *x, 3319 struct audit_buffer *audit_buf) 3320 { 3321 struct xfrm_sec_ctx *ctx = x->security; 3322 u32 spi = ntohl(x->id.spi); 3323 3324 if (ctx) 3325 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 3326 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 3327 3328 switch (x->props.family) { 3329 case AF_INET: 3330 audit_log_format(audit_buf, " src=%pI4 dst=%pI4", 3331 &x->props.saddr.a4, &x->id.daddr.a4); 3332 break; 3333 case AF_INET6: 3334 audit_log_format(audit_buf, " src=%pI6 dst=%pI6", 3335 x->props.saddr.a6, x->id.daddr.a6); 3336 break; 3337 } 3338 3339 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); 3340 } 3341 3342 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, 3343 struct audit_buffer *audit_buf) 3344 { 3345 const struct iphdr *iph4; 3346 const struct ipv6hdr *iph6; 3347 3348 switch (family) { 3349 case AF_INET: 3350 iph4 = ip_hdr(skb); 3351 audit_log_format(audit_buf, " src=%pI4 dst=%pI4", 3352 &iph4->saddr, &iph4->daddr); 3353 break; 3354 case AF_INET6: 3355 iph6 = ipv6_hdr(skb); 3356 audit_log_format(audit_buf, 3357 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x", 3358 &iph6->saddr, &iph6->daddr, 3359 iph6->flow_lbl[0] & 0x0f, 3360 iph6->flow_lbl[1], 3361 iph6->flow_lbl[2]); 3362 break; 3363 } 3364 } 3365 3366 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid) 3367 { 3368 struct audit_buffer *audit_buf; 3369 3370 audit_buf = xfrm_audit_start("SAD-add"); 3371 if (audit_buf == NULL) 3372 return; 3373 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 3374 xfrm_audit_helper_sainfo(x, audit_buf); 3375 audit_log_format(audit_buf, " res=%u", result); 3376 audit_log_end(audit_buf); 3377 } 3378 EXPORT_SYMBOL_GPL(xfrm_audit_state_add); 3379 3380 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid) 3381 { 3382 struct audit_buffer *audit_buf; 3383 3384 audit_buf = xfrm_audit_start("SAD-delete"); 3385 if (audit_buf == NULL) 3386 return; 3387 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 3388 xfrm_audit_helper_sainfo(x, audit_buf); 3389 audit_log_format(audit_buf, " res=%u", result); 3390 audit_log_end(audit_buf); 3391 } 3392 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete); 3393 3394 void xfrm_audit_state_replay_overflow(struct xfrm_state *x, 3395 struct sk_buff *skb) 3396 { 3397 struct audit_buffer *audit_buf; 3398 u32 spi; 3399 3400 audit_buf = xfrm_audit_start("SA-replay-overflow"); 3401 if (audit_buf == NULL) 3402 return; 3403 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 3404 /* don't record the sequence number because it's inherent in this kind 3405 * of audit message */ 3406 spi = ntohl(x->id.spi); 3407 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); 3408 audit_log_end(audit_buf); 3409 } 3410 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow); 3411 3412 void xfrm_audit_state_replay(struct xfrm_state *x, 3413 struct sk_buff *skb, __be32 net_seq) 3414 { 3415 struct audit_buffer *audit_buf; 3416 u32 spi; 3417 3418 audit_buf = xfrm_audit_start("SA-replayed-pkt"); 3419 if (audit_buf == NULL) 3420 return; 3421 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 3422 spi = ntohl(x->id.spi); 3423 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 3424 spi, spi, ntohl(net_seq)); 3425 audit_log_end(audit_buf); 3426 } 3427 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay); 3428 3429 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family) 3430 { 3431 struct audit_buffer *audit_buf; 3432 3433 audit_buf = xfrm_audit_start("SA-notfound"); 3434 if (audit_buf == NULL) 3435 return; 3436 xfrm_audit_helper_pktinfo(skb, family, audit_buf); 3437 audit_log_end(audit_buf); 3438 } 3439 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple); 3440 3441 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, 3442 __be32 net_spi, __be32 net_seq) 3443 { 3444 struct audit_buffer *audit_buf; 3445 u32 spi; 3446 3447 audit_buf = xfrm_audit_start("SA-notfound"); 3448 if (audit_buf == NULL) 3449 return; 3450 xfrm_audit_helper_pktinfo(skb, family, audit_buf); 3451 spi = ntohl(net_spi); 3452 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 3453 spi, spi, ntohl(net_seq)); 3454 audit_log_end(audit_buf); 3455 } 3456 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound); 3457 3458 void xfrm_audit_state_icvfail(struct xfrm_state *x, 3459 struct sk_buff *skb, u8 proto) 3460 { 3461 struct audit_buffer *audit_buf; 3462 __be32 net_spi; 3463 __be32 net_seq; 3464 3465 audit_buf = xfrm_audit_start("SA-icv-failure"); 3466 if (audit_buf == NULL) 3467 return; 3468 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 3469 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) { 3470 u32 spi = ntohl(net_spi); 3471 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 3472 spi, spi, ntohl(net_seq)); 3473 } 3474 audit_log_end(audit_buf); 3475 } 3476 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail); 3477 #endif /* CONFIG_AUDITSYSCALL */ 3478