1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * xfrm_state.c 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * YOSHIFUJI Hideaki @USAGI 11 * Split up af-specific functions 12 * Derek Atkins <derek@ihtfp.com> 13 * Add UDP Encapsulation 14 * 15 */ 16 17 #include <linux/compat.h> 18 #include <linux/workqueue.h> 19 #include <net/xfrm.h> 20 #include <linux/pfkeyv2.h> 21 #include <linux/ipsec.h> 22 #include <linux/module.h> 23 #include <linux/cache.h> 24 #include <linux/audit.h> 25 #include <linux/uaccess.h> 26 #include <linux/ktime.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/kernel.h> 30 31 #include <crypto/aead.h> 32 33 #include "xfrm_hash.h" 34 35 #define xfrm_state_deref_prot(table, net) \ 36 rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock)) 37 #define xfrm_state_deref_check(table, net) \ 38 rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock)) 39 40 static void xfrm_state_gc_task(struct work_struct *work); 41 42 /* Each xfrm_state may be linked to two tables: 43 44 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl) 45 2. Hash table by (daddr,family,reqid) to find what SAs exist for given 46 destination/tunnel endpoint. (output) 47 */ 48 49 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 50 static struct kmem_cache *xfrm_state_cache __ro_after_init; 51 52 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task); 53 static HLIST_HEAD(xfrm_state_gc_list); 54 static HLIST_HEAD(xfrm_state_dev_gc_list); 55 56 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x) 57 { 58 return refcount_inc_not_zero(&x->refcnt); 59 } 60 61 static inline unsigned int xfrm_dst_hash(struct net *net, 62 const xfrm_address_t *daddr, 63 const xfrm_address_t *saddr, 64 u32 reqid, 65 unsigned short family) 66 { 67 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 68 69 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask); 70 } 71 72 static inline unsigned int xfrm_src_hash(struct net *net, 73 const xfrm_address_t *daddr, 74 const xfrm_address_t *saddr, 75 unsigned short family) 76 { 77 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 78 79 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask); 80 } 81 82 static inline unsigned int 83 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr, 84 __be32 spi, u8 proto, unsigned short family) 85 { 86 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 87 88 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask); 89 } 90 91 static unsigned int xfrm_seq_hash(struct net *net, u32 seq) 92 { 93 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 94 95 return __xfrm_seq_hash(seq, net->xfrm.state_hmask); 96 } 97 98 #define XFRM_STATE_INSERT(by, _n, _h, _type) \ 99 { \ 100 struct xfrm_state *_x = NULL; \ 101 \ 102 if (_type != XFRM_DEV_OFFLOAD_PACKET) { \ 103 hlist_for_each_entry_rcu(_x, _h, by) { \ 104 if (_x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \ 105 continue; \ 106 break; \ 107 } \ 108 } \ 109 \ 110 if (!_x || _x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \ 111 /* SAD is empty or consist from HW SAs only */ \ 112 hlist_add_head_rcu(_n, _h); \ 113 else \ 114 hlist_add_before_rcu(_n, &_x->by); \ 115 } 116 117 static void xfrm_hash_transfer(struct hlist_head *list, 118 struct hlist_head *ndsttable, 119 struct hlist_head *nsrctable, 120 struct hlist_head *nspitable, 121 struct hlist_head *nseqtable, 122 unsigned int nhashmask) 123 { 124 struct hlist_node *tmp; 125 struct xfrm_state *x; 126 127 hlist_for_each_entry_safe(x, tmp, list, bydst) { 128 unsigned int h; 129 130 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, 131 x->props.reqid, x->props.family, 132 nhashmask); 133 XFRM_STATE_INSERT(bydst, &x->bydst, ndsttable + h, x->xso.type); 134 135 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr, 136 x->props.family, 137 nhashmask); 138 XFRM_STATE_INSERT(bysrc, &x->bysrc, nsrctable + h, x->xso.type); 139 140 if (x->id.spi) { 141 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, 142 x->id.proto, x->props.family, 143 nhashmask); 144 XFRM_STATE_INSERT(byspi, &x->byspi, nspitable + h, 145 x->xso.type); 146 } 147 148 if (x->km.seq) { 149 h = __xfrm_seq_hash(x->km.seq, nhashmask); 150 XFRM_STATE_INSERT(byseq, &x->byseq, nseqtable + h, 151 x->xso.type); 152 } 153 } 154 } 155 156 static unsigned long xfrm_hash_new_size(unsigned int state_hmask) 157 { 158 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head); 159 } 160 161 static void xfrm_hash_resize(struct work_struct *work) 162 { 163 struct net *net = container_of(work, struct net, xfrm.state_hash_work); 164 struct hlist_head *ndst, *nsrc, *nspi, *nseq, *odst, *osrc, *ospi, *oseq; 165 unsigned long nsize, osize; 166 unsigned int nhashmask, ohashmask; 167 int i; 168 169 nsize = xfrm_hash_new_size(net->xfrm.state_hmask); 170 ndst = xfrm_hash_alloc(nsize); 171 if (!ndst) 172 return; 173 nsrc = xfrm_hash_alloc(nsize); 174 if (!nsrc) { 175 xfrm_hash_free(ndst, nsize); 176 return; 177 } 178 nspi = xfrm_hash_alloc(nsize); 179 if (!nspi) { 180 xfrm_hash_free(ndst, nsize); 181 xfrm_hash_free(nsrc, nsize); 182 return; 183 } 184 nseq = xfrm_hash_alloc(nsize); 185 if (!nseq) { 186 xfrm_hash_free(ndst, nsize); 187 xfrm_hash_free(nsrc, nsize); 188 xfrm_hash_free(nspi, nsize); 189 return; 190 } 191 192 spin_lock_bh(&net->xfrm.xfrm_state_lock); 193 write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); 194 195 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; 196 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net); 197 for (i = net->xfrm.state_hmask; i >= 0; i--) 198 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nseq, nhashmask); 199 200 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net); 201 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net); 202 oseq = xfrm_state_deref_prot(net->xfrm.state_byseq, net); 203 ohashmask = net->xfrm.state_hmask; 204 205 rcu_assign_pointer(net->xfrm.state_bydst, ndst); 206 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc); 207 rcu_assign_pointer(net->xfrm.state_byspi, nspi); 208 rcu_assign_pointer(net->xfrm.state_byseq, nseq); 209 net->xfrm.state_hmask = nhashmask; 210 211 write_seqcount_end(&net->xfrm.xfrm_state_hash_generation); 212 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 213 214 osize = (ohashmask + 1) * sizeof(struct hlist_head); 215 216 synchronize_rcu(); 217 218 xfrm_hash_free(odst, osize); 219 xfrm_hash_free(osrc, osize); 220 xfrm_hash_free(ospi, osize); 221 xfrm_hash_free(oseq, osize); 222 } 223 224 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock); 225 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO]; 226 227 static DEFINE_SPINLOCK(xfrm_state_gc_lock); 228 static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock); 229 230 int __xfrm_state_delete(struct xfrm_state *x); 231 232 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); 233 static bool km_is_alive(const struct km_event *c); 234 void km_state_expired(struct xfrm_state *x, int hard, u32 portid); 235 236 int xfrm_register_type(const struct xfrm_type *type, unsigned short family) 237 { 238 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 239 int err = 0; 240 241 if (!afinfo) 242 return -EAFNOSUPPORT; 243 244 #define X(afi, T, name) do { \ 245 WARN_ON((afi)->type_ ## name); \ 246 (afi)->type_ ## name = (T); \ 247 } while (0) 248 249 switch (type->proto) { 250 case IPPROTO_COMP: 251 X(afinfo, type, comp); 252 break; 253 case IPPROTO_AH: 254 X(afinfo, type, ah); 255 break; 256 case IPPROTO_ESP: 257 X(afinfo, type, esp); 258 break; 259 case IPPROTO_IPIP: 260 X(afinfo, type, ipip); 261 break; 262 case IPPROTO_DSTOPTS: 263 X(afinfo, type, dstopts); 264 break; 265 case IPPROTO_ROUTING: 266 X(afinfo, type, routing); 267 break; 268 case IPPROTO_IPV6: 269 X(afinfo, type, ipip6); 270 break; 271 default: 272 WARN_ON(1); 273 err = -EPROTONOSUPPORT; 274 break; 275 } 276 #undef X 277 rcu_read_unlock(); 278 return err; 279 } 280 EXPORT_SYMBOL(xfrm_register_type); 281 282 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family) 283 { 284 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 285 286 if (unlikely(afinfo == NULL)) 287 return; 288 289 #define X(afi, T, name) do { \ 290 WARN_ON((afi)->type_ ## name != (T)); \ 291 (afi)->type_ ## name = NULL; \ 292 } while (0) 293 294 switch (type->proto) { 295 case IPPROTO_COMP: 296 X(afinfo, type, comp); 297 break; 298 case IPPROTO_AH: 299 X(afinfo, type, ah); 300 break; 301 case IPPROTO_ESP: 302 X(afinfo, type, esp); 303 break; 304 case IPPROTO_IPIP: 305 X(afinfo, type, ipip); 306 break; 307 case IPPROTO_DSTOPTS: 308 X(afinfo, type, dstopts); 309 break; 310 case IPPROTO_ROUTING: 311 X(afinfo, type, routing); 312 break; 313 case IPPROTO_IPV6: 314 X(afinfo, type, ipip6); 315 break; 316 default: 317 WARN_ON(1); 318 break; 319 } 320 #undef X 321 rcu_read_unlock(); 322 } 323 EXPORT_SYMBOL(xfrm_unregister_type); 324 325 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family) 326 { 327 const struct xfrm_type *type = NULL; 328 struct xfrm_state_afinfo *afinfo; 329 int modload_attempted = 0; 330 331 retry: 332 afinfo = xfrm_state_get_afinfo(family); 333 if (unlikely(afinfo == NULL)) 334 return NULL; 335 336 switch (proto) { 337 case IPPROTO_COMP: 338 type = afinfo->type_comp; 339 break; 340 case IPPROTO_AH: 341 type = afinfo->type_ah; 342 break; 343 case IPPROTO_ESP: 344 type = afinfo->type_esp; 345 break; 346 case IPPROTO_IPIP: 347 type = afinfo->type_ipip; 348 break; 349 case IPPROTO_DSTOPTS: 350 type = afinfo->type_dstopts; 351 break; 352 case IPPROTO_ROUTING: 353 type = afinfo->type_routing; 354 break; 355 case IPPROTO_IPV6: 356 type = afinfo->type_ipip6; 357 break; 358 default: 359 break; 360 } 361 362 if (unlikely(type && !try_module_get(type->owner))) 363 type = NULL; 364 365 rcu_read_unlock(); 366 367 if (!type && !modload_attempted) { 368 request_module("xfrm-type-%d-%d", family, proto); 369 modload_attempted = 1; 370 goto retry; 371 } 372 373 return type; 374 } 375 376 static void xfrm_put_type(const struct xfrm_type *type) 377 { 378 module_put(type->owner); 379 } 380 381 int xfrm_register_type_offload(const struct xfrm_type_offload *type, 382 unsigned short family) 383 { 384 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 385 int err = 0; 386 387 if (unlikely(afinfo == NULL)) 388 return -EAFNOSUPPORT; 389 390 switch (type->proto) { 391 case IPPROTO_ESP: 392 WARN_ON(afinfo->type_offload_esp); 393 afinfo->type_offload_esp = type; 394 break; 395 default: 396 WARN_ON(1); 397 err = -EPROTONOSUPPORT; 398 break; 399 } 400 401 rcu_read_unlock(); 402 return err; 403 } 404 EXPORT_SYMBOL(xfrm_register_type_offload); 405 406 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, 407 unsigned short family) 408 { 409 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 410 411 if (unlikely(afinfo == NULL)) 412 return; 413 414 switch (type->proto) { 415 case IPPROTO_ESP: 416 WARN_ON(afinfo->type_offload_esp != type); 417 afinfo->type_offload_esp = NULL; 418 break; 419 default: 420 WARN_ON(1); 421 break; 422 } 423 rcu_read_unlock(); 424 } 425 EXPORT_SYMBOL(xfrm_unregister_type_offload); 426 427 void xfrm_set_type_offload(struct xfrm_state *x, bool try_load) 428 { 429 const struct xfrm_type_offload *type = NULL; 430 struct xfrm_state_afinfo *afinfo; 431 432 retry: 433 afinfo = xfrm_state_get_afinfo(x->props.family); 434 if (unlikely(afinfo == NULL)) 435 goto out; 436 437 switch (x->id.proto) { 438 case IPPROTO_ESP: 439 type = afinfo->type_offload_esp; 440 break; 441 default: 442 break; 443 } 444 445 if ((type && !try_module_get(type->owner))) 446 type = NULL; 447 448 rcu_read_unlock(); 449 450 if (!type && try_load) { 451 request_module("xfrm-offload-%d-%d", x->props.family, 452 x->id.proto); 453 try_load = false; 454 goto retry; 455 } 456 457 out: 458 x->type_offload = type; 459 } 460 EXPORT_SYMBOL(xfrm_set_type_offload); 461 462 static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = { 463 [XFRM_MODE_BEET] = { 464 .encap = XFRM_MODE_BEET, 465 .flags = XFRM_MODE_FLAG_TUNNEL, 466 .family = AF_INET, 467 }, 468 [XFRM_MODE_TRANSPORT] = { 469 .encap = XFRM_MODE_TRANSPORT, 470 .family = AF_INET, 471 }, 472 [XFRM_MODE_TUNNEL] = { 473 .encap = XFRM_MODE_TUNNEL, 474 .flags = XFRM_MODE_FLAG_TUNNEL, 475 .family = AF_INET, 476 }, 477 [XFRM_MODE_IPTFS] = { 478 .encap = XFRM_MODE_IPTFS, 479 .flags = XFRM_MODE_FLAG_TUNNEL, 480 .family = AF_INET, 481 }, 482 }; 483 484 static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = { 485 [XFRM_MODE_BEET] = { 486 .encap = XFRM_MODE_BEET, 487 .flags = XFRM_MODE_FLAG_TUNNEL, 488 .family = AF_INET6, 489 }, 490 [XFRM_MODE_ROUTEOPTIMIZATION] = { 491 .encap = XFRM_MODE_ROUTEOPTIMIZATION, 492 .family = AF_INET6, 493 }, 494 [XFRM_MODE_TRANSPORT] = { 495 .encap = XFRM_MODE_TRANSPORT, 496 .family = AF_INET6, 497 }, 498 [XFRM_MODE_TUNNEL] = { 499 .encap = XFRM_MODE_TUNNEL, 500 .flags = XFRM_MODE_FLAG_TUNNEL, 501 .family = AF_INET6, 502 }, 503 [XFRM_MODE_IPTFS] = { 504 .encap = XFRM_MODE_IPTFS, 505 .flags = XFRM_MODE_FLAG_TUNNEL, 506 .family = AF_INET6, 507 }, 508 }; 509 510 static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family) 511 { 512 const struct xfrm_mode *mode; 513 514 if (unlikely(encap >= XFRM_MODE_MAX)) 515 return NULL; 516 517 switch (family) { 518 case AF_INET: 519 mode = &xfrm4_mode_map[encap]; 520 if (mode->family == family) 521 return mode; 522 break; 523 case AF_INET6: 524 mode = &xfrm6_mode_map[encap]; 525 if (mode->family == family) 526 return mode; 527 break; 528 default: 529 break; 530 } 531 532 return NULL; 533 } 534 535 static const struct xfrm_mode_cbs __rcu *xfrm_mode_cbs_map[XFRM_MODE_MAX]; 536 static DEFINE_SPINLOCK(xfrm_mode_cbs_map_lock); 537 538 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs) 539 { 540 if (mode >= XFRM_MODE_MAX) 541 return -EINVAL; 542 543 spin_lock_bh(&xfrm_mode_cbs_map_lock); 544 rcu_assign_pointer(xfrm_mode_cbs_map[mode], mode_cbs); 545 spin_unlock_bh(&xfrm_mode_cbs_map_lock); 546 547 return 0; 548 } 549 EXPORT_SYMBOL(xfrm_register_mode_cbs); 550 551 void xfrm_unregister_mode_cbs(u8 mode) 552 { 553 if (mode >= XFRM_MODE_MAX) 554 return; 555 556 spin_lock_bh(&xfrm_mode_cbs_map_lock); 557 RCU_INIT_POINTER(xfrm_mode_cbs_map[mode], NULL); 558 spin_unlock_bh(&xfrm_mode_cbs_map_lock); 559 synchronize_rcu(); 560 } 561 EXPORT_SYMBOL(xfrm_unregister_mode_cbs); 562 563 static const struct xfrm_mode_cbs *xfrm_get_mode_cbs(u8 mode) 564 { 565 const struct xfrm_mode_cbs *cbs; 566 bool try_load = true; 567 568 if (mode >= XFRM_MODE_MAX) 569 return NULL; 570 571 retry: 572 rcu_read_lock(); 573 574 cbs = rcu_dereference(xfrm_mode_cbs_map[mode]); 575 if (cbs && !try_module_get(cbs->owner)) 576 cbs = NULL; 577 578 rcu_read_unlock(); 579 580 if (mode == XFRM_MODE_IPTFS && !cbs && try_load) { 581 request_module("xfrm-iptfs"); 582 try_load = false; 583 goto retry; 584 } 585 586 return cbs; 587 } 588 589 void xfrm_state_free(struct xfrm_state *x) 590 { 591 kmem_cache_free(xfrm_state_cache, x); 592 } 593 EXPORT_SYMBOL(xfrm_state_free); 594 595 static void xfrm_state_gc_destroy(struct xfrm_state *x) 596 { 597 if (x->mode_cbs && x->mode_cbs->destroy_state) 598 x->mode_cbs->destroy_state(x); 599 hrtimer_cancel(&x->mtimer); 600 timer_delete_sync(&x->rtimer); 601 kfree_sensitive(x->aead); 602 kfree_sensitive(x->aalg); 603 kfree_sensitive(x->ealg); 604 kfree(x->calg); 605 kfree(x->encap); 606 kfree(x->coaddr); 607 kfree(x->replay_esn); 608 kfree(x->preplay_esn); 609 xfrm_unset_type_offload(x); 610 if (x->type) { 611 x->type->destructor(x); 612 xfrm_put_type(x->type); 613 } 614 if (x->xfrag.page) 615 put_page(x->xfrag.page); 616 xfrm_dev_state_free(x); 617 security_xfrm_state_free(x); 618 xfrm_state_free(x); 619 } 620 621 static void xfrm_state_gc_task(struct work_struct *work) 622 { 623 struct xfrm_state *x; 624 struct hlist_node *tmp; 625 struct hlist_head gc_list; 626 627 spin_lock_bh(&xfrm_state_gc_lock); 628 hlist_move_list(&xfrm_state_gc_list, &gc_list); 629 spin_unlock_bh(&xfrm_state_gc_lock); 630 631 synchronize_rcu(); 632 633 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) 634 xfrm_state_gc_destroy(x); 635 } 636 637 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) 638 { 639 struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer); 640 enum hrtimer_restart ret = HRTIMER_NORESTART; 641 time64_t now = ktime_get_real_seconds(); 642 time64_t next = TIME64_MAX; 643 int warn = 0; 644 int err = 0; 645 646 spin_lock(&x->lock); 647 xfrm_dev_state_update_stats(x); 648 649 if (x->km.state == XFRM_STATE_DEAD) 650 goto out; 651 if (x->km.state == XFRM_STATE_EXPIRED) 652 goto expired; 653 if (x->lft.hard_add_expires_seconds) { 654 time64_t tmo = x->lft.hard_add_expires_seconds + 655 x->curlft.add_time - now; 656 if (tmo <= 0) { 657 if (x->xflags & XFRM_SOFT_EXPIRE) { 658 /* enter hard expire without soft expire first?! 659 * setting a new date could trigger this. 660 * workaround: fix x->curflt.add_time by below: 661 */ 662 x->curlft.add_time = now - x->saved_tmo - 1; 663 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo; 664 } else 665 goto expired; 666 } 667 if (tmo < next) 668 next = tmo; 669 } 670 if (x->lft.hard_use_expires_seconds) { 671 time64_t tmo = x->lft.hard_use_expires_seconds + 672 (READ_ONCE(x->curlft.use_time) ? : now) - now; 673 if (tmo <= 0) 674 goto expired; 675 if (tmo < next) 676 next = tmo; 677 } 678 if (x->km.dying) 679 goto resched; 680 if (x->lft.soft_add_expires_seconds) { 681 time64_t tmo = x->lft.soft_add_expires_seconds + 682 x->curlft.add_time - now; 683 if (tmo <= 0) { 684 warn = 1; 685 x->xflags &= ~XFRM_SOFT_EXPIRE; 686 } else if (tmo < next) { 687 next = tmo; 688 x->xflags |= XFRM_SOFT_EXPIRE; 689 x->saved_tmo = tmo; 690 } 691 } 692 if (x->lft.soft_use_expires_seconds) { 693 time64_t tmo = x->lft.soft_use_expires_seconds + 694 (READ_ONCE(x->curlft.use_time) ? : now) - now; 695 if (tmo <= 0) 696 warn = 1; 697 else if (tmo < next) 698 next = tmo; 699 } 700 701 x->km.dying = warn; 702 if (warn) 703 km_state_expired(x, 0, 0); 704 resched: 705 if (next != TIME64_MAX) { 706 hrtimer_forward_now(&x->mtimer, ktime_set(next, 0)); 707 ret = HRTIMER_RESTART; 708 } 709 710 goto out; 711 712 expired: 713 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) 714 x->km.state = XFRM_STATE_EXPIRED; 715 716 err = __xfrm_state_delete(x); 717 if (!err) 718 km_state_expired(x, 1, 0); 719 720 xfrm_audit_state_delete(x, err ? 0 : 1, true); 721 722 out: 723 spin_unlock(&x->lock); 724 return ret; 725 } 726 727 static void xfrm_replay_timer_handler(struct timer_list *t); 728 729 struct xfrm_state *xfrm_state_alloc(struct net *net) 730 { 731 struct xfrm_state *x; 732 733 x = kmem_cache_zalloc(xfrm_state_cache, GFP_ATOMIC); 734 735 if (x) { 736 write_pnet(&x->xs_net, net); 737 refcount_set(&x->refcnt, 1); 738 atomic_set(&x->tunnel_users, 0); 739 INIT_LIST_HEAD(&x->km.all); 740 INIT_HLIST_NODE(&x->state_cache); 741 INIT_HLIST_NODE(&x->bydst); 742 INIT_HLIST_NODE(&x->bysrc); 743 INIT_HLIST_NODE(&x->byspi); 744 INIT_HLIST_NODE(&x->byseq); 745 hrtimer_setup(&x->mtimer, xfrm_timer_handler, CLOCK_BOOTTIME, 746 HRTIMER_MODE_ABS_SOFT); 747 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0); 748 x->curlft.add_time = ktime_get_real_seconds(); 749 x->lft.soft_byte_limit = XFRM_INF; 750 x->lft.soft_packet_limit = XFRM_INF; 751 x->lft.hard_byte_limit = XFRM_INF; 752 x->lft.hard_packet_limit = XFRM_INF; 753 x->replay_maxage = 0; 754 x->replay_maxdiff = 0; 755 x->pcpu_num = UINT_MAX; 756 spin_lock_init(&x->lock); 757 x->mode_data = NULL; 758 } 759 return x; 760 } 761 EXPORT_SYMBOL(xfrm_state_alloc); 762 763 #ifdef CONFIG_XFRM_OFFLOAD 764 void xfrm_dev_state_delete(struct xfrm_state *x) 765 { 766 struct xfrm_dev_offload *xso = &x->xso; 767 struct net_device *dev = READ_ONCE(xso->dev); 768 769 if (dev) { 770 dev->xfrmdev_ops->xdo_dev_state_delete(dev, x); 771 spin_lock_bh(&xfrm_state_dev_gc_lock); 772 hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list); 773 spin_unlock_bh(&xfrm_state_dev_gc_lock); 774 } 775 } 776 EXPORT_SYMBOL_GPL(xfrm_dev_state_delete); 777 778 void xfrm_dev_state_free(struct xfrm_state *x) 779 { 780 struct xfrm_dev_offload *xso = &x->xso; 781 struct net_device *dev = READ_ONCE(xso->dev); 782 783 if (dev && dev->xfrmdev_ops) { 784 spin_lock_bh(&xfrm_state_dev_gc_lock); 785 if (!hlist_unhashed(&x->dev_gclist)) 786 hlist_del(&x->dev_gclist); 787 spin_unlock_bh(&xfrm_state_dev_gc_lock); 788 789 if (dev->xfrmdev_ops->xdo_dev_state_free) 790 dev->xfrmdev_ops->xdo_dev_state_free(dev, x); 791 WRITE_ONCE(xso->dev, NULL); 792 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 793 netdev_put(dev, &xso->dev_tracker); 794 } 795 } 796 #endif 797 798 void __xfrm_state_destroy(struct xfrm_state *x) 799 { 800 WARN_ON(x->km.state != XFRM_STATE_DEAD); 801 802 spin_lock_bh(&xfrm_state_gc_lock); 803 hlist_add_head(&x->gclist, &xfrm_state_gc_list); 804 spin_unlock_bh(&xfrm_state_gc_lock); 805 schedule_work(&xfrm_state_gc_work); 806 } 807 EXPORT_SYMBOL(__xfrm_state_destroy); 808 809 static void xfrm_state_delete_tunnel(struct xfrm_state *x); 810 int __xfrm_state_delete(struct xfrm_state *x) 811 { 812 struct net *net = xs_net(x); 813 int err = -ESRCH; 814 815 if (x->km.state != XFRM_STATE_DEAD) { 816 x->km.state = XFRM_STATE_DEAD; 817 818 spin_lock(&net->xfrm.xfrm_state_lock); 819 list_del(&x->km.all); 820 hlist_del_rcu(&x->bydst); 821 hlist_del_rcu(&x->bysrc); 822 if (x->km.seq) 823 hlist_del_rcu(&x->byseq); 824 if (!hlist_unhashed(&x->state_cache)) 825 hlist_del_rcu(&x->state_cache); 826 if (!hlist_unhashed(&x->state_cache_input)) 827 hlist_del_rcu(&x->state_cache_input); 828 829 if (x->id.spi) 830 hlist_del_rcu(&x->byspi); 831 net->xfrm.state_num--; 832 xfrm_nat_keepalive_state_updated(x); 833 spin_unlock(&net->xfrm.xfrm_state_lock); 834 835 xfrm_dev_state_delete(x); 836 837 xfrm_state_delete_tunnel(x); 838 839 /* All xfrm_state objects are created by xfrm_state_alloc. 840 * The xfrm_state_alloc call gives a reference, and that 841 * is what we are dropping here. 842 */ 843 xfrm_state_put(x); 844 err = 0; 845 } 846 847 return err; 848 } 849 EXPORT_SYMBOL(__xfrm_state_delete); 850 851 int xfrm_state_delete(struct xfrm_state *x) 852 { 853 int err; 854 855 spin_lock_bh(&x->lock); 856 err = __xfrm_state_delete(x); 857 spin_unlock_bh(&x->lock); 858 859 return err; 860 } 861 EXPORT_SYMBOL(xfrm_state_delete); 862 863 #ifdef CONFIG_SECURITY_NETWORK_XFRM 864 static inline int 865 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) 866 { 867 int i, err = 0; 868 869 for (i = 0; i <= net->xfrm.state_hmask; i++) { 870 struct xfrm_state *x; 871 872 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 873 if (xfrm_id_proto_match(x->id.proto, proto) && 874 (err = security_xfrm_state_delete(x)) != 0) { 875 xfrm_audit_state_delete(x, 0, task_valid); 876 return err; 877 } 878 } 879 } 880 881 return err; 882 } 883 884 static inline int 885 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid) 886 { 887 int i, err = 0; 888 889 for (i = 0; i <= net->xfrm.state_hmask; i++) { 890 struct xfrm_state *x; 891 struct xfrm_dev_offload *xso; 892 893 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 894 xso = &x->xso; 895 896 if (xso->dev == dev && 897 (err = security_xfrm_state_delete(x)) != 0) { 898 xfrm_audit_state_delete(x, 0, task_valid); 899 return err; 900 } 901 } 902 } 903 904 return err; 905 } 906 #else 907 static inline int 908 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) 909 { 910 return 0; 911 } 912 913 static inline int 914 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid) 915 { 916 return 0; 917 } 918 #endif 919 920 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) 921 { 922 int i, err = 0, cnt = 0; 923 924 spin_lock_bh(&net->xfrm.xfrm_state_lock); 925 err = xfrm_state_flush_secctx_check(net, proto, task_valid); 926 if (err) 927 goto out; 928 929 err = -ESRCH; 930 for (i = 0; i <= net->xfrm.state_hmask; i++) { 931 struct xfrm_state *x; 932 restart: 933 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 934 if (!xfrm_state_kern(x) && 935 xfrm_id_proto_match(x->id.proto, proto)) { 936 xfrm_state_hold(x); 937 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 938 939 err = xfrm_state_delete(x); 940 xfrm_audit_state_delete(x, err ? 0 : 1, 941 task_valid); 942 xfrm_state_put(x); 943 if (!err) 944 cnt++; 945 946 spin_lock_bh(&net->xfrm.xfrm_state_lock); 947 goto restart; 948 } 949 } 950 } 951 out: 952 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 953 if (cnt) 954 err = 0; 955 956 return err; 957 } 958 EXPORT_SYMBOL(xfrm_state_flush); 959 960 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid) 961 { 962 struct xfrm_state *x; 963 struct hlist_node *tmp; 964 struct xfrm_dev_offload *xso; 965 int i, err = 0, cnt = 0; 966 967 spin_lock_bh(&net->xfrm.xfrm_state_lock); 968 err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid); 969 if (err) 970 goto out; 971 972 err = -ESRCH; 973 for (i = 0; i <= net->xfrm.state_hmask; i++) { 974 restart: 975 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 976 xso = &x->xso; 977 978 if (!xfrm_state_kern(x) && xso->dev == dev) { 979 xfrm_state_hold(x); 980 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 981 982 err = xfrm_state_delete(x); 983 xfrm_dev_state_free(x); 984 985 xfrm_audit_state_delete(x, err ? 0 : 1, 986 task_valid); 987 xfrm_state_put(x); 988 if (!err) 989 cnt++; 990 991 spin_lock_bh(&net->xfrm.xfrm_state_lock); 992 goto restart; 993 } 994 } 995 } 996 if (cnt) 997 err = 0; 998 999 out: 1000 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1001 1002 spin_lock_bh(&xfrm_state_dev_gc_lock); 1003 restart_gc: 1004 hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) { 1005 xso = &x->xso; 1006 1007 if (xso->dev == dev) { 1008 spin_unlock_bh(&xfrm_state_dev_gc_lock); 1009 xfrm_dev_state_free(x); 1010 spin_lock_bh(&xfrm_state_dev_gc_lock); 1011 goto restart_gc; 1012 } 1013 1014 } 1015 spin_unlock_bh(&xfrm_state_dev_gc_lock); 1016 1017 xfrm_flush_gc(); 1018 1019 return err; 1020 } 1021 EXPORT_SYMBOL(xfrm_dev_state_flush); 1022 1023 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) 1024 { 1025 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1026 si->sadcnt = net->xfrm.state_num; 1027 si->sadhcnt = net->xfrm.state_hmask + 1; 1028 si->sadhmcnt = xfrm_state_hashmax; 1029 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1030 } 1031 EXPORT_SYMBOL(xfrm_sad_getinfo); 1032 1033 static void 1034 __xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) 1035 { 1036 const struct flowi4 *fl4 = &fl->u.ip4; 1037 1038 sel->daddr.a4 = fl4->daddr; 1039 sel->saddr.a4 = fl4->saddr; 1040 sel->dport = xfrm_flowi_dport(fl, &fl4->uli); 1041 sel->dport_mask = htons(0xffff); 1042 sel->sport = xfrm_flowi_sport(fl, &fl4->uli); 1043 sel->sport_mask = htons(0xffff); 1044 sel->family = AF_INET; 1045 sel->prefixlen_d = 32; 1046 sel->prefixlen_s = 32; 1047 sel->proto = fl4->flowi4_proto; 1048 sel->ifindex = fl4->flowi4_oif; 1049 } 1050 1051 static void 1052 __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) 1053 { 1054 const struct flowi6 *fl6 = &fl->u.ip6; 1055 1056 /* Initialize temporary selector matching only to current session. */ 1057 *(struct in6_addr *)&sel->daddr = fl6->daddr; 1058 *(struct in6_addr *)&sel->saddr = fl6->saddr; 1059 sel->dport = xfrm_flowi_dport(fl, &fl6->uli); 1060 sel->dport_mask = htons(0xffff); 1061 sel->sport = xfrm_flowi_sport(fl, &fl6->uli); 1062 sel->sport_mask = htons(0xffff); 1063 sel->family = AF_INET6; 1064 sel->prefixlen_d = 128; 1065 sel->prefixlen_s = 128; 1066 sel->proto = fl6->flowi6_proto; 1067 sel->ifindex = fl6->flowi6_oif; 1068 } 1069 1070 static void 1071 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl, 1072 const struct xfrm_tmpl *tmpl, 1073 const xfrm_address_t *daddr, const xfrm_address_t *saddr, 1074 unsigned short family) 1075 { 1076 switch (family) { 1077 case AF_INET: 1078 __xfrm4_init_tempsel(&x->sel, fl); 1079 break; 1080 case AF_INET6: 1081 __xfrm6_init_tempsel(&x->sel, fl); 1082 break; 1083 } 1084 1085 x->id = tmpl->id; 1086 1087 switch (tmpl->encap_family) { 1088 case AF_INET: 1089 if (x->id.daddr.a4 == 0) 1090 x->id.daddr.a4 = daddr->a4; 1091 x->props.saddr = tmpl->saddr; 1092 if (x->props.saddr.a4 == 0) 1093 x->props.saddr.a4 = saddr->a4; 1094 break; 1095 case AF_INET6: 1096 if (ipv6_addr_any((struct in6_addr *)&x->id.daddr)) 1097 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); 1098 memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr)); 1099 if (ipv6_addr_any((struct in6_addr *)&x->props.saddr)) 1100 memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr)); 1101 break; 1102 } 1103 1104 x->props.mode = tmpl->mode; 1105 x->props.reqid = tmpl->reqid; 1106 x->props.family = tmpl->encap_family; 1107 } 1108 1109 struct xfrm_hash_state_ptrs { 1110 const struct hlist_head *bydst; 1111 const struct hlist_head *bysrc; 1112 const struct hlist_head *byspi; 1113 unsigned int hmask; 1114 }; 1115 1116 static void xfrm_hash_ptrs_get(const struct net *net, struct xfrm_hash_state_ptrs *ptrs) 1117 { 1118 unsigned int sequence; 1119 1120 do { 1121 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); 1122 1123 ptrs->bydst = xfrm_state_deref_check(net->xfrm.state_bydst, net); 1124 ptrs->bysrc = xfrm_state_deref_check(net->xfrm.state_bysrc, net); 1125 ptrs->byspi = xfrm_state_deref_check(net->xfrm.state_byspi, net); 1126 ptrs->hmask = net->xfrm.state_hmask; 1127 } while (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)); 1128 } 1129 1130 static struct xfrm_state *__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs *state_ptrs, 1131 u32 mark, 1132 const xfrm_address_t *daddr, 1133 __be32 spi, u8 proto, 1134 unsigned short family, 1135 struct xfrm_dev_offload *xdo) 1136 { 1137 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask); 1138 struct xfrm_state *x; 1139 1140 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) { 1141 #ifdef CONFIG_XFRM_OFFLOAD 1142 if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) { 1143 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) 1144 /* HW states are in the head of list, there is 1145 * no need to iterate further. 1146 */ 1147 break; 1148 1149 /* Packet offload: both policy and SA should 1150 * have same device. 1151 */ 1152 if (xdo->dev != x->xso.dev) 1153 continue; 1154 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 1155 /* Skip HW policy for SW lookups */ 1156 continue; 1157 #endif 1158 if (x->props.family != family || 1159 x->id.spi != spi || 1160 x->id.proto != proto || 1161 !xfrm_addr_equal(&x->id.daddr, daddr, family)) 1162 continue; 1163 1164 if ((mark & x->mark.m) != x->mark.v) 1165 continue; 1166 if (!xfrm_state_hold_rcu(x)) 1167 continue; 1168 return x; 1169 } 1170 1171 return NULL; 1172 } 1173 1174 static struct xfrm_state *__xfrm_state_lookup(const struct xfrm_hash_state_ptrs *state_ptrs, 1175 u32 mark, 1176 const xfrm_address_t *daddr, 1177 __be32 spi, u8 proto, 1178 unsigned short family) 1179 { 1180 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask); 1181 struct xfrm_state *x; 1182 1183 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) { 1184 if (x->props.family != family || 1185 x->id.spi != spi || 1186 x->id.proto != proto || 1187 !xfrm_addr_equal(&x->id.daddr, daddr, family)) 1188 continue; 1189 1190 if ((mark & x->mark.m) != x->mark.v) 1191 continue; 1192 if (!xfrm_state_hold_rcu(x)) 1193 continue; 1194 return x; 1195 } 1196 1197 return NULL; 1198 } 1199 1200 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark, 1201 const xfrm_address_t *daddr, 1202 __be32 spi, u8 proto, 1203 unsigned short family) 1204 { 1205 struct xfrm_hash_state_ptrs state_ptrs; 1206 struct hlist_head *state_cache_input; 1207 struct xfrm_state *x = NULL; 1208 1209 state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input); 1210 1211 rcu_read_lock(); 1212 hlist_for_each_entry_rcu(x, state_cache_input, state_cache_input) { 1213 if (x->props.family != family || 1214 x->id.spi != spi || 1215 x->id.proto != proto || 1216 !xfrm_addr_equal(&x->id.daddr, daddr, family)) 1217 continue; 1218 1219 if ((mark & x->mark.m) != x->mark.v) 1220 continue; 1221 if (!xfrm_state_hold_rcu(x)) 1222 continue; 1223 goto out; 1224 } 1225 1226 xfrm_hash_ptrs_get(net, &state_ptrs); 1227 1228 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family); 1229 1230 if (x && x->km.state == XFRM_STATE_VALID) { 1231 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1232 if (hlist_unhashed(&x->state_cache_input)) { 1233 hlist_add_head_rcu(&x->state_cache_input, state_cache_input); 1234 } else { 1235 hlist_del_rcu(&x->state_cache_input); 1236 hlist_add_head_rcu(&x->state_cache_input, state_cache_input); 1237 } 1238 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1239 } 1240 1241 out: 1242 rcu_read_unlock(); 1243 return x; 1244 } 1245 EXPORT_SYMBOL(xfrm_input_state_lookup); 1246 1247 static struct xfrm_state *__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs *state_ptrs, 1248 u32 mark, 1249 const xfrm_address_t *daddr, 1250 const xfrm_address_t *saddr, 1251 u8 proto, unsigned short family) 1252 { 1253 unsigned int h = __xfrm_src_hash(daddr, saddr, family, state_ptrs->hmask); 1254 struct xfrm_state *x; 1255 1256 hlist_for_each_entry_rcu(x, state_ptrs->bysrc + h, bysrc) { 1257 if (x->props.family != family || 1258 x->id.proto != proto || 1259 !xfrm_addr_equal(&x->id.daddr, daddr, family) || 1260 !xfrm_addr_equal(&x->props.saddr, saddr, family)) 1261 continue; 1262 1263 if ((mark & x->mark.m) != x->mark.v) 1264 continue; 1265 if (!xfrm_state_hold_rcu(x)) 1266 continue; 1267 return x; 1268 } 1269 1270 return NULL; 1271 } 1272 1273 static inline struct xfrm_state * 1274 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family) 1275 { 1276 struct xfrm_hash_state_ptrs state_ptrs; 1277 struct net *net = xs_net(x); 1278 u32 mark = x->mark.v & x->mark.m; 1279 1280 xfrm_hash_ptrs_get(net, &state_ptrs); 1281 1282 if (use_spi) 1283 return __xfrm_state_lookup(&state_ptrs, mark, &x->id.daddr, 1284 x->id.spi, x->id.proto, family); 1285 else 1286 return __xfrm_state_lookup_byaddr(&state_ptrs, mark, 1287 &x->id.daddr, 1288 &x->props.saddr, 1289 x->id.proto, family); 1290 } 1291 1292 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision) 1293 { 1294 if (have_hash_collision && 1295 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax && 1296 net->xfrm.state_num > net->xfrm.state_hmask) 1297 schedule_work(&net->xfrm.state_hash_work); 1298 } 1299 1300 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, 1301 const struct flowi *fl, unsigned short family, 1302 struct xfrm_state **best, int *acq_in_progress, 1303 int *error, unsigned int pcpu_id) 1304 { 1305 /* Resolution logic: 1306 * 1. There is a valid state with matching selector. Done. 1307 * 2. Valid state with inappropriate selector. Skip. 1308 * 1309 * Entering area of "sysdeps". 1310 * 1311 * 3. If state is not valid, selector is temporary, it selects 1312 * only session which triggered previous resolution. Key 1313 * manager will do something to install a state with proper 1314 * selector. 1315 */ 1316 if (x->km.state == XFRM_STATE_VALID) { 1317 if ((x->sel.family && 1318 (x->sel.family != family || 1319 !xfrm_selector_match(&x->sel, fl, family))) || 1320 !security_xfrm_state_pol_flow_match(x, pol, 1321 &fl->u.__fl_common)) 1322 return; 1323 1324 if (x->pcpu_num != UINT_MAX && x->pcpu_num != pcpu_id) 1325 return; 1326 1327 if (!*best || 1328 ((*best)->pcpu_num == UINT_MAX && x->pcpu_num == pcpu_id) || 1329 (*best)->km.dying > x->km.dying || 1330 ((*best)->km.dying == x->km.dying && 1331 (*best)->curlft.add_time < x->curlft.add_time)) 1332 *best = x; 1333 } else if (x->km.state == XFRM_STATE_ACQ) { 1334 if (!*best || x->pcpu_num == pcpu_id) 1335 *acq_in_progress = 1; 1336 } else if (x->km.state == XFRM_STATE_ERROR || 1337 x->km.state == XFRM_STATE_EXPIRED) { 1338 if ((!x->sel.family || 1339 (x->sel.family == family && 1340 xfrm_selector_match(&x->sel, fl, family))) && 1341 security_xfrm_state_pol_flow_match(x, pol, 1342 &fl->u.__fl_common)) 1343 *error = -ESRCH; 1344 } 1345 } 1346 1347 struct xfrm_state * 1348 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, 1349 const struct flowi *fl, struct xfrm_tmpl *tmpl, 1350 struct xfrm_policy *pol, int *err, 1351 unsigned short family, u32 if_id) 1352 { 1353 static xfrm_address_t saddr_wildcard = { }; 1354 struct xfrm_hash_state_ptrs state_ptrs; 1355 struct net *net = xp_net(pol); 1356 unsigned int h, h_wildcard; 1357 struct xfrm_state *x, *x0, *to_put; 1358 int acquire_in_progress = 0; 1359 int error = 0; 1360 struct xfrm_state *best = NULL; 1361 u32 mark = pol->mark.v & pol->mark.m; 1362 unsigned short encap_family = tmpl->encap_family; 1363 unsigned int sequence; 1364 struct km_event c; 1365 unsigned int pcpu_id; 1366 bool cached = false; 1367 1368 /* We need the cpu id just as a lookup key, 1369 * we don't require it to be stable. 1370 */ 1371 pcpu_id = raw_smp_processor_id(); 1372 1373 to_put = NULL; 1374 1375 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); 1376 1377 rcu_read_lock(); 1378 xfrm_hash_ptrs_get(net, &state_ptrs); 1379 1380 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) { 1381 if (x->props.family == encap_family && 1382 x->props.reqid == tmpl->reqid && 1383 (mark & x->mark.m) == x->mark.v && 1384 x->if_id == if_id && 1385 !(x->props.flags & XFRM_STATE_WILDRECV) && 1386 xfrm_state_addr_check(x, daddr, saddr, encap_family) && 1387 tmpl->mode == x->props.mode && 1388 tmpl->id.proto == x->id.proto && 1389 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1390 xfrm_state_look_at(pol, x, fl, encap_family, 1391 &best, &acquire_in_progress, &error, pcpu_id); 1392 } 1393 1394 if (best) 1395 goto cached; 1396 1397 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) { 1398 if (x->props.family == encap_family && 1399 x->props.reqid == tmpl->reqid && 1400 (mark & x->mark.m) == x->mark.v && 1401 x->if_id == if_id && 1402 !(x->props.flags & XFRM_STATE_WILDRECV) && 1403 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) && 1404 tmpl->mode == x->props.mode && 1405 tmpl->id.proto == x->id.proto && 1406 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1407 xfrm_state_look_at(pol, x, fl, family, 1408 &best, &acquire_in_progress, &error, pcpu_id); 1409 } 1410 1411 cached: 1412 cached = true; 1413 if (best) 1414 goto found; 1415 else if (error) 1416 best = NULL; 1417 else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */ 1418 WARN_ON(1); 1419 1420 h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask); 1421 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) { 1422 #ifdef CONFIG_XFRM_OFFLOAD 1423 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { 1424 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) 1425 /* HW states are in the head of list, there is 1426 * no need to iterate further. 1427 */ 1428 break; 1429 1430 /* Packet offload: both policy and SA should 1431 * have same device. 1432 */ 1433 if (pol->xdo.dev != x->xso.dev) 1434 continue; 1435 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 1436 /* Skip HW policy for SW lookups */ 1437 continue; 1438 #endif 1439 if (x->props.family == encap_family && 1440 x->props.reqid == tmpl->reqid && 1441 (mark & x->mark.m) == x->mark.v && 1442 x->if_id == if_id && 1443 !(x->props.flags & XFRM_STATE_WILDRECV) && 1444 xfrm_state_addr_check(x, daddr, saddr, encap_family) && 1445 tmpl->mode == x->props.mode && 1446 tmpl->id.proto == x->id.proto && 1447 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1448 xfrm_state_look_at(pol, x, fl, family, 1449 &best, &acquire_in_progress, &error, pcpu_id); 1450 } 1451 if (best || acquire_in_progress) 1452 goto found; 1453 1454 h_wildcard = __xfrm_dst_hash(daddr, &saddr_wildcard, tmpl->reqid, 1455 encap_family, state_ptrs.hmask); 1456 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h_wildcard, bydst) { 1457 #ifdef CONFIG_XFRM_OFFLOAD 1458 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { 1459 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) 1460 /* HW states are in the head of list, there is 1461 * no need to iterate further. 1462 */ 1463 break; 1464 1465 /* Packet offload: both policy and SA should 1466 * have same device. 1467 */ 1468 if (pol->xdo.dev != x->xso.dev) 1469 continue; 1470 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 1471 /* Skip HW policy for SW lookups */ 1472 continue; 1473 #endif 1474 if (x->props.family == encap_family && 1475 x->props.reqid == tmpl->reqid && 1476 (mark & x->mark.m) == x->mark.v && 1477 x->if_id == if_id && 1478 !(x->props.flags & XFRM_STATE_WILDRECV) && 1479 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) && 1480 tmpl->mode == x->props.mode && 1481 tmpl->id.proto == x->id.proto && 1482 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1483 xfrm_state_look_at(pol, x, fl, family, 1484 &best, &acquire_in_progress, &error, pcpu_id); 1485 } 1486 1487 found: 1488 if (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || 1489 (best && (best->pcpu_num == pcpu_id))) 1490 x = best; 1491 1492 if (!x && !error && !acquire_in_progress) { 1493 if (tmpl->id.spi && 1494 (x0 = __xfrm_state_lookup_all(&state_ptrs, mark, daddr, 1495 tmpl->id.spi, tmpl->id.proto, 1496 encap_family, 1497 &pol->xdo)) != NULL) { 1498 to_put = x0; 1499 error = -EEXIST; 1500 goto out; 1501 } 1502 1503 c.net = net; 1504 /* If the KMs have no listeners (yet...), avoid allocating an SA 1505 * for each and every packet - garbage collection might not 1506 * handle the flood. 1507 */ 1508 if (!km_is_alive(&c)) { 1509 error = -ESRCH; 1510 goto out; 1511 } 1512 1513 x = xfrm_state_alloc(net); 1514 if (x == NULL) { 1515 error = -ENOMEM; 1516 goto out; 1517 } 1518 /* Initialize temporary state matching only 1519 * to current session. */ 1520 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family); 1521 memcpy(&x->mark, &pol->mark, sizeof(x->mark)); 1522 x->if_id = if_id; 1523 if ((pol->flags & XFRM_POLICY_CPU_ACQUIRE) && best) 1524 x->pcpu_num = pcpu_id; 1525 1526 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid); 1527 if (error) { 1528 x->km.state = XFRM_STATE_DEAD; 1529 to_put = x; 1530 x = NULL; 1531 goto out; 1532 } 1533 #ifdef CONFIG_XFRM_OFFLOAD 1534 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { 1535 struct xfrm_dev_offload *xdo = &pol->xdo; 1536 struct xfrm_dev_offload *xso = &x->xso; 1537 struct net_device *dev = xdo->dev; 1538 1539 xso->type = XFRM_DEV_OFFLOAD_PACKET; 1540 xso->dir = xdo->dir; 1541 xso->dev = dev; 1542 xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ; 1543 netdev_hold(dev, &xso->dev_tracker, GFP_ATOMIC); 1544 error = dev->xfrmdev_ops->xdo_dev_state_add(dev, x, 1545 NULL); 1546 if (error) { 1547 xso->dir = 0; 1548 netdev_put(dev, &xso->dev_tracker); 1549 xso->dev = NULL; 1550 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 1551 x->km.state = XFRM_STATE_DEAD; 1552 to_put = x; 1553 x = NULL; 1554 goto out; 1555 } 1556 } 1557 #endif 1558 if (km_query(x, tmpl, pol) == 0) { 1559 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1560 x->km.state = XFRM_STATE_ACQ; 1561 x->dir = XFRM_SA_DIR_OUT; 1562 list_add(&x->km.all, &net->xfrm.state_all); 1563 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); 1564 XFRM_STATE_INSERT(bydst, &x->bydst, 1565 net->xfrm.state_bydst + h, 1566 x->xso.type); 1567 h = xfrm_src_hash(net, daddr, saddr, encap_family); 1568 XFRM_STATE_INSERT(bysrc, &x->bysrc, 1569 net->xfrm.state_bysrc + h, 1570 x->xso.type); 1571 INIT_HLIST_NODE(&x->state_cache); 1572 if (x->id.spi) { 1573 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); 1574 XFRM_STATE_INSERT(byspi, &x->byspi, 1575 net->xfrm.state_byspi + h, 1576 x->xso.type); 1577 } 1578 if (x->km.seq) { 1579 h = xfrm_seq_hash(net, x->km.seq); 1580 XFRM_STATE_INSERT(byseq, &x->byseq, 1581 net->xfrm.state_byseq + h, 1582 x->xso.type); 1583 } 1584 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1585 hrtimer_start(&x->mtimer, 1586 ktime_set(net->xfrm.sysctl_acq_expires, 0), 1587 HRTIMER_MODE_REL_SOFT); 1588 net->xfrm.state_num++; 1589 xfrm_hash_grow_check(net, x->bydst.next != NULL); 1590 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1591 } else { 1592 #ifdef CONFIG_XFRM_OFFLOAD 1593 struct xfrm_dev_offload *xso = &x->xso; 1594 1595 if (xso->type == XFRM_DEV_OFFLOAD_PACKET) { 1596 xfrm_dev_state_delete(x); 1597 xfrm_dev_state_free(x); 1598 } 1599 #endif 1600 x->km.state = XFRM_STATE_DEAD; 1601 to_put = x; 1602 x = NULL; 1603 error = -ESRCH; 1604 } 1605 1606 /* Use the already installed 'fallback' while the CPU-specific 1607 * SA acquire is handled*/ 1608 if (best) 1609 x = best; 1610 } 1611 out: 1612 if (x) { 1613 if (!xfrm_state_hold_rcu(x)) { 1614 *err = -EAGAIN; 1615 x = NULL; 1616 } 1617 } else { 1618 *err = acquire_in_progress ? -EAGAIN : error; 1619 } 1620 1621 if (x && x->km.state == XFRM_STATE_VALID && !cached && 1622 (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || x->pcpu_num == pcpu_id)) { 1623 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1624 if (hlist_unhashed(&x->state_cache)) 1625 hlist_add_head_rcu(&x->state_cache, &pol->state_cache_list); 1626 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1627 } 1628 1629 rcu_read_unlock(); 1630 if (to_put) 1631 xfrm_state_put(to_put); 1632 1633 if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) { 1634 *err = -EAGAIN; 1635 if (x) { 1636 xfrm_state_put(x); 1637 x = NULL; 1638 } 1639 } 1640 1641 return x; 1642 } 1643 1644 struct xfrm_state * 1645 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id, 1646 xfrm_address_t *daddr, xfrm_address_t *saddr, 1647 unsigned short family, u8 mode, u8 proto, u32 reqid) 1648 { 1649 unsigned int h; 1650 struct xfrm_state *rx = NULL, *x = NULL; 1651 1652 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1653 h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 1654 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 1655 if (x->props.family == family && 1656 x->props.reqid == reqid && 1657 (mark & x->mark.m) == x->mark.v && 1658 x->if_id == if_id && 1659 !(x->props.flags & XFRM_STATE_WILDRECV) && 1660 xfrm_state_addr_check(x, daddr, saddr, family) && 1661 mode == x->props.mode && 1662 proto == x->id.proto && 1663 x->km.state == XFRM_STATE_VALID) { 1664 rx = x; 1665 break; 1666 } 1667 } 1668 1669 if (rx) 1670 xfrm_state_hold(rx); 1671 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1672 1673 1674 return rx; 1675 } 1676 EXPORT_SYMBOL(xfrm_stateonly_find); 1677 1678 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, 1679 unsigned short family) 1680 { 1681 struct xfrm_state *x; 1682 struct xfrm_state_walk *w; 1683 1684 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1685 list_for_each_entry(w, &net->xfrm.state_all, all) { 1686 x = container_of(w, struct xfrm_state, km); 1687 if (x->props.family != family || 1688 x->id.spi != spi) 1689 continue; 1690 1691 xfrm_state_hold(x); 1692 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1693 return x; 1694 } 1695 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1696 return NULL; 1697 } 1698 EXPORT_SYMBOL(xfrm_state_lookup_byspi); 1699 1700 static void __xfrm_state_insert(struct xfrm_state *x) 1701 { 1702 struct net *net = xs_net(x); 1703 unsigned int h; 1704 1705 list_add(&x->km.all, &net->xfrm.state_all); 1706 1707 /* Sanitize mark before store */ 1708 x->mark.v &= x->mark.m; 1709 1710 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, 1711 x->props.reqid, x->props.family); 1712 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h, 1713 x->xso.type); 1714 1715 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family); 1716 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h, 1717 x->xso.type); 1718 1719 if (x->id.spi) { 1720 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, 1721 x->props.family); 1722 1723 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, 1724 x->xso.type); 1725 } 1726 1727 if (x->km.seq) { 1728 h = xfrm_seq_hash(net, x->km.seq); 1729 1730 XFRM_STATE_INSERT(byseq, &x->byseq, net->xfrm.state_byseq + h, 1731 x->xso.type); 1732 } 1733 1734 hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT); 1735 if (x->replay_maxage) 1736 mod_timer(&x->rtimer, jiffies + x->replay_maxage); 1737 1738 net->xfrm.state_num++; 1739 1740 xfrm_hash_grow_check(net, x->bydst.next != NULL); 1741 xfrm_nat_keepalive_state_updated(x); 1742 } 1743 1744 /* net->xfrm.xfrm_state_lock is held */ 1745 static void __xfrm_state_bump_genids(struct xfrm_state *xnew) 1746 { 1747 struct net *net = xs_net(xnew); 1748 unsigned short family = xnew->props.family; 1749 u32 reqid = xnew->props.reqid; 1750 struct xfrm_state *x; 1751 unsigned int h; 1752 u32 mark = xnew->mark.v & xnew->mark.m; 1753 u32 if_id = xnew->if_id; 1754 u32 cpu_id = xnew->pcpu_num; 1755 1756 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); 1757 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 1758 if (x->props.family == family && 1759 x->props.reqid == reqid && 1760 x->if_id == if_id && 1761 x->pcpu_num == cpu_id && 1762 (mark & x->mark.m) == x->mark.v && 1763 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) && 1764 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family)) 1765 x->genid++; 1766 } 1767 } 1768 1769 void xfrm_state_insert(struct xfrm_state *x) 1770 { 1771 struct net *net = xs_net(x); 1772 1773 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1774 __xfrm_state_bump_genids(x); 1775 __xfrm_state_insert(x); 1776 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1777 } 1778 EXPORT_SYMBOL(xfrm_state_insert); 1779 1780 /* net->xfrm.xfrm_state_lock is held */ 1781 static struct xfrm_state *__find_acq_core(struct net *net, 1782 const struct xfrm_mark *m, 1783 unsigned short family, u8 mode, 1784 u32 reqid, u32 if_id, u32 pcpu_num, u8 proto, 1785 const xfrm_address_t *daddr, 1786 const xfrm_address_t *saddr, 1787 int create) 1788 { 1789 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 1790 struct xfrm_state *x; 1791 u32 mark = m->v & m->m; 1792 1793 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 1794 if (x->props.reqid != reqid || 1795 x->props.mode != mode || 1796 x->props.family != family || 1797 x->km.state != XFRM_STATE_ACQ || 1798 x->id.spi != 0 || 1799 x->id.proto != proto || 1800 (mark & x->mark.m) != x->mark.v || 1801 x->pcpu_num != pcpu_num || 1802 !xfrm_addr_equal(&x->id.daddr, daddr, family) || 1803 !xfrm_addr_equal(&x->props.saddr, saddr, family)) 1804 continue; 1805 1806 xfrm_state_hold(x); 1807 return x; 1808 } 1809 1810 if (!create) 1811 return NULL; 1812 1813 x = xfrm_state_alloc(net); 1814 if (likely(x)) { 1815 switch (family) { 1816 case AF_INET: 1817 x->sel.daddr.a4 = daddr->a4; 1818 x->sel.saddr.a4 = saddr->a4; 1819 x->sel.prefixlen_d = 32; 1820 x->sel.prefixlen_s = 32; 1821 x->props.saddr.a4 = saddr->a4; 1822 x->id.daddr.a4 = daddr->a4; 1823 break; 1824 1825 case AF_INET6: 1826 x->sel.daddr.in6 = daddr->in6; 1827 x->sel.saddr.in6 = saddr->in6; 1828 x->sel.prefixlen_d = 128; 1829 x->sel.prefixlen_s = 128; 1830 x->props.saddr.in6 = saddr->in6; 1831 x->id.daddr.in6 = daddr->in6; 1832 break; 1833 } 1834 1835 x->pcpu_num = pcpu_num; 1836 x->km.state = XFRM_STATE_ACQ; 1837 x->id.proto = proto; 1838 x->props.family = family; 1839 x->props.mode = mode; 1840 x->props.reqid = reqid; 1841 x->if_id = if_id; 1842 x->mark.v = m->v; 1843 x->mark.m = m->m; 1844 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1845 xfrm_state_hold(x); 1846 hrtimer_start(&x->mtimer, 1847 ktime_set(net->xfrm.sysctl_acq_expires, 0), 1848 HRTIMER_MODE_REL_SOFT); 1849 list_add(&x->km.all, &net->xfrm.state_all); 1850 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h, 1851 x->xso.type); 1852 h = xfrm_src_hash(net, daddr, saddr, family); 1853 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h, 1854 x->xso.type); 1855 1856 net->xfrm.state_num++; 1857 1858 xfrm_hash_grow_check(net, x->bydst.next != NULL); 1859 } 1860 1861 return x; 1862 } 1863 1864 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num); 1865 1866 int xfrm_state_add(struct xfrm_state *x) 1867 { 1868 struct net *net = xs_net(x); 1869 struct xfrm_state *x1, *to_put; 1870 int family; 1871 int err; 1872 u32 mark = x->mark.v & x->mark.m; 1873 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1874 1875 family = x->props.family; 1876 1877 to_put = NULL; 1878 1879 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1880 1881 x1 = __xfrm_state_locate(x, use_spi, family); 1882 if (x1) { 1883 to_put = x1; 1884 x1 = NULL; 1885 err = -EEXIST; 1886 goto out; 1887 } 1888 1889 if (use_spi && x->km.seq) { 1890 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq, x->pcpu_num); 1891 if (x1 && ((x1->id.proto != x->id.proto) || 1892 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) { 1893 to_put = x1; 1894 x1 = NULL; 1895 } 1896 } 1897 1898 if (use_spi && !x1) 1899 x1 = __find_acq_core(net, &x->mark, family, x->props.mode, 1900 x->props.reqid, x->if_id, x->pcpu_num, x->id.proto, 1901 &x->id.daddr, &x->props.saddr, 0); 1902 1903 __xfrm_state_bump_genids(x); 1904 __xfrm_state_insert(x); 1905 err = 0; 1906 1907 out: 1908 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1909 1910 if (x1) { 1911 xfrm_state_delete(x1); 1912 xfrm_state_put(x1); 1913 } 1914 1915 if (to_put) 1916 xfrm_state_put(to_put); 1917 1918 return err; 1919 } 1920 EXPORT_SYMBOL(xfrm_state_add); 1921 1922 #ifdef CONFIG_XFRM_MIGRATE 1923 static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security) 1924 { 1925 struct xfrm_user_sec_ctx *uctx; 1926 int size = sizeof(*uctx) + security->ctx_len; 1927 int err; 1928 1929 uctx = kmalloc(size, GFP_KERNEL); 1930 if (!uctx) 1931 return -ENOMEM; 1932 1933 uctx->exttype = XFRMA_SEC_CTX; 1934 uctx->len = size; 1935 uctx->ctx_doi = security->ctx_doi; 1936 uctx->ctx_alg = security->ctx_alg; 1937 uctx->ctx_len = security->ctx_len; 1938 memcpy(uctx + 1, security->ctx_str, security->ctx_len); 1939 err = security_xfrm_state_alloc(x, uctx); 1940 kfree(uctx); 1941 if (err) 1942 return err; 1943 1944 return 0; 1945 } 1946 1947 static struct xfrm_state *xfrm_state_clone_and_setup(struct xfrm_state *orig, 1948 struct xfrm_encap_tmpl *encap, 1949 struct xfrm_migrate *m) 1950 { 1951 struct net *net = xs_net(orig); 1952 struct xfrm_state *x = xfrm_state_alloc(net); 1953 if (!x) 1954 goto out; 1955 1956 memcpy(&x->id, &orig->id, sizeof(x->id)); 1957 memcpy(&x->sel, &orig->sel, sizeof(x->sel)); 1958 memcpy(&x->lft, &orig->lft, sizeof(x->lft)); 1959 x->props.mode = orig->props.mode; 1960 x->props.replay_window = orig->props.replay_window; 1961 x->props.reqid = orig->props.reqid; 1962 x->props.family = orig->props.family; 1963 x->props.saddr = orig->props.saddr; 1964 1965 if (orig->aalg) { 1966 x->aalg = xfrm_algo_auth_clone(orig->aalg); 1967 if (!x->aalg) 1968 goto error; 1969 } 1970 x->props.aalgo = orig->props.aalgo; 1971 1972 if (orig->aead) { 1973 x->aead = xfrm_algo_aead_clone(orig->aead); 1974 x->geniv = orig->geniv; 1975 if (!x->aead) 1976 goto error; 1977 } 1978 if (orig->ealg) { 1979 x->ealg = xfrm_algo_clone(orig->ealg); 1980 if (!x->ealg) 1981 goto error; 1982 } 1983 x->props.ealgo = orig->props.ealgo; 1984 1985 if (orig->calg) { 1986 x->calg = xfrm_algo_clone(orig->calg); 1987 if (!x->calg) 1988 goto error; 1989 } 1990 x->props.calgo = orig->props.calgo; 1991 1992 if (encap || orig->encap) { 1993 if (encap) 1994 x->encap = kmemdup(encap, sizeof(*x->encap), 1995 GFP_KERNEL); 1996 else 1997 x->encap = kmemdup(orig->encap, sizeof(*x->encap), 1998 GFP_KERNEL); 1999 2000 if (!x->encap) 2001 goto error; 2002 } 2003 2004 if (orig->security) 2005 if (clone_security(x, orig->security)) 2006 goto error; 2007 2008 if (orig->coaddr) { 2009 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr), 2010 GFP_KERNEL); 2011 if (!x->coaddr) 2012 goto error; 2013 } 2014 2015 if (orig->replay_esn) { 2016 if (xfrm_replay_clone(x, orig)) 2017 goto error; 2018 } 2019 2020 memcpy(&x->mark, &orig->mark, sizeof(x->mark)); 2021 memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark)); 2022 2023 x->props.flags = orig->props.flags; 2024 x->props.extra_flags = orig->props.extra_flags; 2025 2026 x->pcpu_num = orig->pcpu_num; 2027 x->if_id = orig->if_id; 2028 x->tfcpad = orig->tfcpad; 2029 x->replay_maxdiff = orig->replay_maxdiff; 2030 x->replay_maxage = orig->replay_maxage; 2031 memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft)); 2032 x->km.state = orig->km.state; 2033 x->km.seq = orig->km.seq; 2034 x->replay = orig->replay; 2035 x->preplay = orig->preplay; 2036 x->mapping_maxage = orig->mapping_maxage; 2037 x->lastused = orig->lastused; 2038 x->new_mapping = 0; 2039 x->new_mapping_sport = 0; 2040 x->dir = orig->dir; 2041 2042 x->mode_cbs = orig->mode_cbs; 2043 if (x->mode_cbs && x->mode_cbs->clone_state) { 2044 if (x->mode_cbs->clone_state(x, orig)) 2045 goto error; 2046 } 2047 2048 2049 x->props.family = m->new_family; 2050 memcpy(&x->id.daddr, &m->new_daddr, sizeof(x->id.daddr)); 2051 memcpy(&x->props.saddr, &m->new_saddr, sizeof(x->props.saddr)); 2052 2053 return x; 2054 2055 error: 2056 xfrm_state_put(x); 2057 out: 2058 return NULL; 2059 } 2060 2061 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net, 2062 u32 if_id) 2063 { 2064 unsigned int h; 2065 struct xfrm_state *x = NULL; 2066 2067 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2068 2069 if (m->reqid) { 2070 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr, 2071 m->reqid, m->old_family); 2072 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 2073 if (x->props.mode != m->mode || 2074 x->id.proto != m->proto) 2075 continue; 2076 if (m->reqid && x->props.reqid != m->reqid) 2077 continue; 2078 if (if_id != 0 && x->if_id != if_id) 2079 continue; 2080 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, 2081 m->old_family) || 2082 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, 2083 m->old_family)) 2084 continue; 2085 xfrm_state_hold(x); 2086 break; 2087 } 2088 } else { 2089 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr, 2090 m->old_family); 2091 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) { 2092 if (x->props.mode != m->mode || 2093 x->id.proto != m->proto) 2094 continue; 2095 if (if_id != 0 && x->if_id != if_id) 2096 continue; 2097 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, 2098 m->old_family) || 2099 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, 2100 m->old_family)) 2101 continue; 2102 xfrm_state_hold(x); 2103 break; 2104 } 2105 } 2106 2107 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2108 2109 return x; 2110 } 2111 EXPORT_SYMBOL(xfrm_migrate_state_find); 2112 2113 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, 2114 struct xfrm_migrate *m, 2115 struct xfrm_encap_tmpl *encap, 2116 struct net *net, 2117 struct xfrm_user_offload *xuo, 2118 struct netlink_ext_ack *extack) 2119 { 2120 struct xfrm_state *xc; 2121 2122 xc = xfrm_state_clone_and_setup(x, encap, m); 2123 if (!xc) 2124 return NULL; 2125 2126 if (xfrm_init_state(xc) < 0) 2127 goto error; 2128 2129 /* configure the hardware if offload is requested */ 2130 if (xuo && xfrm_dev_state_add(net, xc, xuo, extack)) 2131 goto error; 2132 2133 /* add state */ 2134 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) { 2135 /* a care is needed when the destination address of the 2136 state is to be updated as it is a part of triplet */ 2137 xfrm_state_insert(xc); 2138 } else { 2139 if (xfrm_state_add(xc) < 0) 2140 goto error; 2141 } 2142 2143 return xc; 2144 error: 2145 xfrm_state_put(xc); 2146 return NULL; 2147 } 2148 EXPORT_SYMBOL(xfrm_state_migrate); 2149 #endif 2150 2151 int xfrm_state_update(struct xfrm_state *x) 2152 { 2153 struct xfrm_state *x1, *to_put; 2154 int err; 2155 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 2156 struct net *net = xs_net(x); 2157 2158 to_put = NULL; 2159 2160 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2161 x1 = __xfrm_state_locate(x, use_spi, x->props.family); 2162 2163 err = -ESRCH; 2164 if (!x1) 2165 goto out; 2166 2167 if (xfrm_state_kern(x1)) { 2168 to_put = x1; 2169 err = -EEXIST; 2170 goto out; 2171 } 2172 2173 if (x1->km.state == XFRM_STATE_ACQ) { 2174 if (x->dir && x1->dir != x->dir) 2175 goto out; 2176 2177 __xfrm_state_insert(x); 2178 x = NULL; 2179 } else { 2180 if (x1->dir != x->dir) 2181 goto out; 2182 } 2183 err = 0; 2184 2185 out: 2186 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2187 2188 if (to_put) 2189 xfrm_state_put(to_put); 2190 2191 if (err) 2192 return err; 2193 2194 if (!x) { 2195 xfrm_state_delete(x1); 2196 xfrm_state_put(x1); 2197 return 0; 2198 } 2199 2200 err = -EINVAL; 2201 spin_lock_bh(&x1->lock); 2202 if (likely(x1->km.state == XFRM_STATE_VALID)) { 2203 if (x->encap && x1->encap && 2204 x->encap->encap_type == x1->encap->encap_type) 2205 memcpy(x1->encap, x->encap, sizeof(*x1->encap)); 2206 else if (x->encap || x1->encap) 2207 goto fail; 2208 2209 if (x->coaddr && x1->coaddr) { 2210 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr)); 2211 } 2212 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel))) 2213 memcpy(&x1->sel, &x->sel, sizeof(x1->sel)); 2214 memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); 2215 x1->km.dying = 0; 2216 2217 hrtimer_start(&x1->mtimer, ktime_set(1, 0), 2218 HRTIMER_MODE_REL_SOFT); 2219 if (READ_ONCE(x1->curlft.use_time)) 2220 xfrm_state_check_expire(x1); 2221 2222 if (x->props.smark.m || x->props.smark.v || x->if_id) { 2223 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2224 2225 if (x->props.smark.m || x->props.smark.v) 2226 x1->props.smark = x->props.smark; 2227 2228 if (x->if_id) 2229 x1->if_id = x->if_id; 2230 2231 __xfrm_state_bump_genids(x1); 2232 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2233 } 2234 2235 err = 0; 2236 x->km.state = XFRM_STATE_DEAD; 2237 __xfrm_state_put(x); 2238 } 2239 2240 fail: 2241 spin_unlock_bh(&x1->lock); 2242 2243 xfrm_state_put(x1); 2244 2245 return err; 2246 } 2247 EXPORT_SYMBOL(xfrm_state_update); 2248 2249 int xfrm_state_check_expire(struct xfrm_state *x) 2250 { 2251 xfrm_dev_state_update_stats(x); 2252 2253 if (!READ_ONCE(x->curlft.use_time)) 2254 WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds()); 2255 2256 if (x->curlft.bytes >= x->lft.hard_byte_limit || 2257 x->curlft.packets >= x->lft.hard_packet_limit) { 2258 x->km.state = XFRM_STATE_EXPIRED; 2259 hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT); 2260 return -EINVAL; 2261 } 2262 2263 if (!x->km.dying && 2264 (x->curlft.bytes >= x->lft.soft_byte_limit || 2265 x->curlft.packets >= x->lft.soft_packet_limit)) { 2266 x->km.dying = 1; 2267 km_state_expired(x, 0, 0); 2268 } 2269 return 0; 2270 } 2271 EXPORT_SYMBOL(xfrm_state_check_expire); 2272 2273 void xfrm_state_update_stats(struct net *net) 2274 { 2275 struct xfrm_state *x; 2276 int i; 2277 2278 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2279 for (i = 0; i <= net->xfrm.state_hmask; i++) { 2280 hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst) 2281 xfrm_dev_state_update_stats(x); 2282 } 2283 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2284 } 2285 2286 struct xfrm_state * 2287 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi, 2288 u8 proto, unsigned short family) 2289 { 2290 struct xfrm_hash_state_ptrs state_ptrs; 2291 struct xfrm_state *x; 2292 2293 rcu_read_lock(); 2294 xfrm_hash_ptrs_get(net, &state_ptrs); 2295 2296 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family); 2297 rcu_read_unlock(); 2298 return x; 2299 } 2300 EXPORT_SYMBOL(xfrm_state_lookup); 2301 2302 struct xfrm_state * 2303 xfrm_state_lookup_byaddr(struct net *net, u32 mark, 2304 const xfrm_address_t *daddr, const xfrm_address_t *saddr, 2305 u8 proto, unsigned short family) 2306 { 2307 struct xfrm_hash_state_ptrs state_ptrs; 2308 struct xfrm_state *x; 2309 2310 rcu_read_lock(); 2311 2312 xfrm_hash_ptrs_get(net, &state_ptrs); 2313 2314 x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family); 2315 rcu_read_unlock(); 2316 return x; 2317 } 2318 EXPORT_SYMBOL(xfrm_state_lookup_byaddr); 2319 2320 struct xfrm_state * 2321 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid, 2322 u32 if_id, u32 pcpu_num, u8 proto, const xfrm_address_t *daddr, 2323 const xfrm_address_t *saddr, int create, unsigned short family) 2324 { 2325 struct xfrm_state *x; 2326 2327 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2328 x = __find_acq_core(net, mark, family, mode, reqid, if_id, pcpu_num, 2329 proto, daddr, saddr, create); 2330 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2331 2332 return x; 2333 } 2334 EXPORT_SYMBOL(xfrm_find_acq); 2335 2336 #ifdef CONFIG_XFRM_SUB_POLICY 2337 #if IS_ENABLED(CONFIG_IPV6) 2338 /* distribution counting sort function for xfrm_state and xfrm_tmpl */ 2339 static void 2340 __xfrm6_sort(void **dst, void **src, int n, 2341 int (*cmp)(const void *p), int maxclass) 2342 { 2343 int count[XFRM_MAX_DEPTH] = { }; 2344 int class[XFRM_MAX_DEPTH]; 2345 int i; 2346 2347 for (i = 0; i < n; i++) { 2348 int c = cmp(src[i]); 2349 2350 class[i] = c; 2351 count[c]++; 2352 } 2353 2354 for (i = 2; i < maxclass; i++) 2355 count[i] += count[i - 1]; 2356 2357 for (i = 0; i < n; i++) { 2358 dst[count[class[i] - 1]++] = src[i]; 2359 src[i] = NULL; 2360 } 2361 } 2362 2363 /* Rule for xfrm_state: 2364 * 2365 * rule 1: select IPsec transport except AH 2366 * rule 2: select MIPv6 RO or inbound trigger 2367 * rule 3: select IPsec transport AH 2368 * rule 4: select IPsec tunnel 2369 * rule 5: others 2370 */ 2371 static int __xfrm6_state_sort_cmp(const void *p) 2372 { 2373 const struct xfrm_state *v = p; 2374 2375 switch (v->props.mode) { 2376 case XFRM_MODE_TRANSPORT: 2377 if (v->id.proto != IPPROTO_AH) 2378 return 1; 2379 else 2380 return 3; 2381 #if IS_ENABLED(CONFIG_IPV6_MIP6) 2382 case XFRM_MODE_ROUTEOPTIMIZATION: 2383 case XFRM_MODE_IN_TRIGGER: 2384 return 2; 2385 #endif 2386 case XFRM_MODE_TUNNEL: 2387 case XFRM_MODE_BEET: 2388 case XFRM_MODE_IPTFS: 2389 return 4; 2390 } 2391 return 5; 2392 } 2393 2394 /* Rule for xfrm_tmpl: 2395 * 2396 * rule 1: select IPsec transport 2397 * rule 2: select MIPv6 RO or inbound trigger 2398 * rule 3: select IPsec tunnel 2399 * rule 4: others 2400 */ 2401 static int __xfrm6_tmpl_sort_cmp(const void *p) 2402 { 2403 const struct xfrm_tmpl *v = p; 2404 2405 switch (v->mode) { 2406 case XFRM_MODE_TRANSPORT: 2407 return 1; 2408 #if IS_ENABLED(CONFIG_IPV6_MIP6) 2409 case XFRM_MODE_ROUTEOPTIMIZATION: 2410 case XFRM_MODE_IN_TRIGGER: 2411 return 2; 2412 #endif 2413 case XFRM_MODE_TUNNEL: 2414 case XFRM_MODE_BEET: 2415 case XFRM_MODE_IPTFS: 2416 return 3; 2417 } 2418 return 4; 2419 } 2420 #else 2421 static inline int __xfrm6_state_sort_cmp(const void *p) { return 5; } 2422 static inline int __xfrm6_tmpl_sort_cmp(const void *p) { return 4; } 2423 2424 static inline void 2425 __xfrm6_sort(void **dst, void **src, int n, 2426 int (*cmp)(const void *p), int maxclass) 2427 { 2428 int i; 2429 2430 for (i = 0; i < n; i++) 2431 dst[i] = src[i]; 2432 } 2433 #endif /* CONFIG_IPV6 */ 2434 2435 void 2436 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, 2437 unsigned short family) 2438 { 2439 int i; 2440 2441 if (family == AF_INET6) 2442 __xfrm6_sort((void **)dst, (void **)src, n, 2443 __xfrm6_tmpl_sort_cmp, 5); 2444 else 2445 for (i = 0; i < n; i++) 2446 dst[i] = src[i]; 2447 } 2448 2449 void 2450 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, 2451 unsigned short family) 2452 { 2453 int i; 2454 2455 if (family == AF_INET6) 2456 __xfrm6_sort((void **)dst, (void **)src, n, 2457 __xfrm6_state_sort_cmp, 6); 2458 else 2459 for (i = 0; i < n; i++) 2460 dst[i] = src[i]; 2461 } 2462 #endif 2463 2464 /* Silly enough, but I'm lazy to build resolution list */ 2465 2466 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num) 2467 { 2468 unsigned int h = xfrm_seq_hash(net, seq); 2469 struct xfrm_state *x; 2470 2471 hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) { 2472 if (x->km.seq == seq && 2473 (mark & x->mark.m) == x->mark.v && 2474 x->pcpu_num == pcpu_num && 2475 x->km.state == XFRM_STATE_ACQ) { 2476 xfrm_state_hold(x); 2477 return x; 2478 } 2479 } 2480 2481 return NULL; 2482 } 2483 2484 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num) 2485 { 2486 struct xfrm_state *x; 2487 2488 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2489 x = __xfrm_find_acq_byseq(net, mark, seq, pcpu_num); 2490 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2491 return x; 2492 } 2493 EXPORT_SYMBOL(xfrm_find_acq_byseq); 2494 2495 u32 xfrm_get_acqseq(void) 2496 { 2497 u32 res; 2498 static atomic_t acqseq; 2499 2500 do { 2501 res = atomic_inc_return(&acqseq); 2502 } while (!res); 2503 2504 return res; 2505 } 2506 EXPORT_SYMBOL(xfrm_get_acqseq); 2507 2508 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack) 2509 { 2510 switch (proto) { 2511 case IPPROTO_AH: 2512 case IPPROTO_ESP: 2513 break; 2514 2515 case IPPROTO_COMP: 2516 /* IPCOMP spi is 16-bits. */ 2517 if (max >= 0x10000) { 2518 NL_SET_ERR_MSG(extack, "IPCOMP SPI must be <= 65535"); 2519 return -EINVAL; 2520 } 2521 break; 2522 2523 default: 2524 NL_SET_ERR_MSG(extack, "Invalid protocol, must be one of AH, ESP, IPCOMP"); 2525 return -EINVAL; 2526 } 2527 2528 if (min > max) { 2529 NL_SET_ERR_MSG(extack, "Invalid SPI range: min > max"); 2530 return -EINVAL; 2531 } 2532 2533 return 0; 2534 } 2535 EXPORT_SYMBOL(verify_spi_info); 2536 2537 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high, 2538 struct netlink_ext_ack *extack) 2539 { 2540 struct net *net = xs_net(x); 2541 unsigned int h; 2542 struct xfrm_state *x0; 2543 int err = -ENOENT; 2544 __be32 minspi = htonl(low); 2545 __be32 maxspi = htonl(high); 2546 __be32 newspi = 0; 2547 u32 mark = x->mark.v & x->mark.m; 2548 2549 spin_lock_bh(&x->lock); 2550 if (x->km.state == XFRM_STATE_DEAD) { 2551 NL_SET_ERR_MSG(extack, "Target ACQUIRE is in DEAD state"); 2552 goto unlock; 2553 } 2554 2555 err = 0; 2556 if (x->id.spi) 2557 goto unlock; 2558 2559 err = -ENOENT; 2560 2561 if (minspi == maxspi) { 2562 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family); 2563 if (x0) { 2564 NL_SET_ERR_MSG(extack, "Requested SPI is already in use"); 2565 xfrm_state_put(x0); 2566 goto unlock; 2567 } 2568 newspi = minspi; 2569 } else { 2570 u32 spi = 0; 2571 for (h = 0; h < high-low+1; h++) { 2572 spi = get_random_u32_inclusive(low, high); 2573 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); 2574 if (x0 == NULL) { 2575 newspi = htonl(spi); 2576 break; 2577 } 2578 xfrm_state_put(x0); 2579 } 2580 } 2581 if (newspi) { 2582 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2583 x->id.spi = newspi; 2584 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); 2585 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, 2586 x->xso.type); 2587 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2588 2589 err = 0; 2590 } else { 2591 NL_SET_ERR_MSG(extack, "No SPI available in the requested range"); 2592 } 2593 2594 unlock: 2595 spin_unlock_bh(&x->lock); 2596 2597 return err; 2598 } 2599 EXPORT_SYMBOL(xfrm_alloc_spi); 2600 2601 static bool __xfrm_state_filter_match(struct xfrm_state *x, 2602 struct xfrm_address_filter *filter) 2603 { 2604 if (filter) { 2605 if ((filter->family == AF_INET || 2606 filter->family == AF_INET6) && 2607 x->props.family != filter->family) 2608 return false; 2609 2610 return addr_match(&x->props.saddr, &filter->saddr, 2611 filter->splen) && 2612 addr_match(&x->id.daddr, &filter->daddr, 2613 filter->dplen); 2614 } 2615 return true; 2616 } 2617 2618 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, 2619 int (*func)(struct xfrm_state *, int, void*), 2620 void *data) 2621 { 2622 struct xfrm_state *state; 2623 struct xfrm_state_walk *x; 2624 int err = 0; 2625 2626 if (walk->seq != 0 && list_empty(&walk->all)) 2627 return 0; 2628 2629 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2630 if (list_empty(&walk->all)) 2631 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all); 2632 else 2633 x = list_first_entry(&walk->all, struct xfrm_state_walk, all); 2634 list_for_each_entry_from(x, &net->xfrm.state_all, all) { 2635 if (x->state == XFRM_STATE_DEAD) 2636 continue; 2637 state = container_of(x, struct xfrm_state, km); 2638 if (!xfrm_id_proto_match(state->id.proto, walk->proto)) 2639 continue; 2640 if (!__xfrm_state_filter_match(state, walk->filter)) 2641 continue; 2642 err = func(state, walk->seq, data); 2643 if (err) { 2644 list_move_tail(&walk->all, &x->all); 2645 goto out; 2646 } 2647 walk->seq++; 2648 } 2649 if (walk->seq == 0) { 2650 err = -ENOENT; 2651 goto out; 2652 } 2653 list_del_init(&walk->all); 2654 out: 2655 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2656 return err; 2657 } 2658 EXPORT_SYMBOL(xfrm_state_walk); 2659 2660 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto, 2661 struct xfrm_address_filter *filter) 2662 { 2663 INIT_LIST_HEAD(&walk->all); 2664 walk->proto = proto; 2665 walk->state = XFRM_STATE_DEAD; 2666 walk->seq = 0; 2667 walk->filter = filter; 2668 } 2669 EXPORT_SYMBOL(xfrm_state_walk_init); 2670 2671 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net) 2672 { 2673 kfree(walk->filter); 2674 2675 if (list_empty(&walk->all)) 2676 return; 2677 2678 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2679 list_del(&walk->all); 2680 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2681 } 2682 EXPORT_SYMBOL(xfrm_state_walk_done); 2683 2684 static void xfrm_replay_timer_handler(struct timer_list *t) 2685 { 2686 struct xfrm_state *x = timer_container_of(x, t, rtimer); 2687 2688 spin_lock(&x->lock); 2689 2690 if (x->km.state == XFRM_STATE_VALID) { 2691 if (xfrm_aevent_is_on(xs_net(x))) 2692 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT); 2693 else 2694 x->xflags |= XFRM_TIME_DEFER; 2695 } 2696 2697 spin_unlock(&x->lock); 2698 } 2699 2700 static LIST_HEAD(xfrm_km_list); 2701 2702 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2703 { 2704 struct xfrm_mgr *km; 2705 2706 rcu_read_lock(); 2707 list_for_each_entry_rcu(km, &xfrm_km_list, list) 2708 if (km->notify_policy) 2709 km->notify_policy(xp, dir, c); 2710 rcu_read_unlock(); 2711 } 2712 2713 void km_state_notify(struct xfrm_state *x, const struct km_event *c) 2714 { 2715 struct xfrm_mgr *km; 2716 rcu_read_lock(); 2717 list_for_each_entry_rcu(km, &xfrm_km_list, list) 2718 if (km->notify) 2719 km->notify(x, c); 2720 rcu_read_unlock(); 2721 } 2722 2723 EXPORT_SYMBOL(km_policy_notify); 2724 EXPORT_SYMBOL(km_state_notify); 2725 2726 void km_state_expired(struct xfrm_state *x, int hard, u32 portid) 2727 { 2728 struct km_event c; 2729 2730 c.data.hard = hard; 2731 c.portid = portid; 2732 c.event = XFRM_MSG_EXPIRE; 2733 km_state_notify(x, &c); 2734 } 2735 2736 EXPORT_SYMBOL(km_state_expired); 2737 /* 2738 * We send to all registered managers regardless of failure 2739 * We are happy with one success 2740 */ 2741 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol) 2742 { 2743 int err = -EINVAL, acqret; 2744 struct xfrm_mgr *km; 2745 2746 rcu_read_lock(); 2747 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2748 acqret = km->acquire(x, t, pol); 2749 if (!acqret) 2750 err = acqret; 2751 } 2752 rcu_read_unlock(); 2753 return err; 2754 } 2755 EXPORT_SYMBOL(km_query); 2756 2757 static int __km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) 2758 { 2759 int err = -EINVAL; 2760 struct xfrm_mgr *km; 2761 2762 rcu_read_lock(); 2763 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2764 if (km->new_mapping) 2765 err = km->new_mapping(x, ipaddr, sport); 2766 if (!err) 2767 break; 2768 } 2769 rcu_read_unlock(); 2770 return err; 2771 } 2772 2773 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) 2774 { 2775 int ret = 0; 2776 2777 if (x->mapping_maxage) { 2778 if ((jiffies / HZ - x->new_mapping) > x->mapping_maxage || 2779 x->new_mapping_sport != sport) { 2780 x->new_mapping_sport = sport; 2781 x->new_mapping = jiffies / HZ; 2782 ret = __km_new_mapping(x, ipaddr, sport); 2783 } 2784 } else { 2785 ret = __km_new_mapping(x, ipaddr, sport); 2786 } 2787 2788 return ret; 2789 } 2790 EXPORT_SYMBOL(km_new_mapping); 2791 2792 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid) 2793 { 2794 struct km_event c; 2795 2796 c.data.hard = hard; 2797 c.portid = portid; 2798 c.event = XFRM_MSG_POLEXPIRE; 2799 km_policy_notify(pol, dir, &c); 2800 } 2801 EXPORT_SYMBOL(km_policy_expired); 2802 2803 #ifdef CONFIG_XFRM_MIGRATE 2804 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2805 const struct xfrm_migrate *m, int num_migrate, 2806 const struct xfrm_kmaddress *k, 2807 const struct xfrm_encap_tmpl *encap) 2808 { 2809 int err = -EINVAL; 2810 int ret; 2811 struct xfrm_mgr *km; 2812 2813 rcu_read_lock(); 2814 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2815 if (km->migrate) { 2816 ret = km->migrate(sel, dir, type, m, num_migrate, k, 2817 encap); 2818 if (!ret) 2819 err = ret; 2820 } 2821 } 2822 rcu_read_unlock(); 2823 return err; 2824 } 2825 EXPORT_SYMBOL(km_migrate); 2826 #endif 2827 2828 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) 2829 { 2830 int err = -EINVAL; 2831 int ret; 2832 struct xfrm_mgr *km; 2833 2834 rcu_read_lock(); 2835 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2836 if (km->report) { 2837 ret = km->report(net, proto, sel, addr); 2838 if (!ret) 2839 err = ret; 2840 } 2841 } 2842 rcu_read_unlock(); 2843 return err; 2844 } 2845 EXPORT_SYMBOL(km_report); 2846 2847 static bool km_is_alive(const struct km_event *c) 2848 { 2849 struct xfrm_mgr *km; 2850 bool is_alive = false; 2851 2852 rcu_read_lock(); 2853 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2854 if (km->is_alive && km->is_alive(c)) { 2855 is_alive = true; 2856 break; 2857 } 2858 } 2859 rcu_read_unlock(); 2860 2861 return is_alive; 2862 } 2863 2864 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT) 2865 static DEFINE_SPINLOCK(xfrm_translator_lock); 2866 static struct xfrm_translator __rcu *xfrm_translator; 2867 2868 struct xfrm_translator *xfrm_get_translator(void) 2869 { 2870 struct xfrm_translator *xtr; 2871 2872 rcu_read_lock(); 2873 xtr = rcu_dereference(xfrm_translator); 2874 if (unlikely(!xtr)) 2875 goto out; 2876 if (!try_module_get(xtr->owner)) 2877 xtr = NULL; 2878 out: 2879 rcu_read_unlock(); 2880 return xtr; 2881 } 2882 EXPORT_SYMBOL_GPL(xfrm_get_translator); 2883 2884 void xfrm_put_translator(struct xfrm_translator *xtr) 2885 { 2886 module_put(xtr->owner); 2887 } 2888 EXPORT_SYMBOL_GPL(xfrm_put_translator); 2889 2890 int xfrm_register_translator(struct xfrm_translator *xtr) 2891 { 2892 int err = 0; 2893 2894 spin_lock_bh(&xfrm_translator_lock); 2895 if (unlikely(xfrm_translator != NULL)) 2896 err = -EEXIST; 2897 else 2898 rcu_assign_pointer(xfrm_translator, xtr); 2899 spin_unlock_bh(&xfrm_translator_lock); 2900 2901 return err; 2902 } 2903 EXPORT_SYMBOL_GPL(xfrm_register_translator); 2904 2905 int xfrm_unregister_translator(struct xfrm_translator *xtr) 2906 { 2907 int err = 0; 2908 2909 spin_lock_bh(&xfrm_translator_lock); 2910 if (likely(xfrm_translator != NULL)) { 2911 if (rcu_access_pointer(xfrm_translator) != xtr) 2912 err = -EINVAL; 2913 else 2914 RCU_INIT_POINTER(xfrm_translator, NULL); 2915 } 2916 spin_unlock_bh(&xfrm_translator_lock); 2917 synchronize_rcu(); 2918 2919 return err; 2920 } 2921 EXPORT_SYMBOL_GPL(xfrm_unregister_translator); 2922 #endif 2923 2924 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen) 2925 { 2926 int err; 2927 u8 *data; 2928 struct xfrm_mgr *km; 2929 struct xfrm_policy *pol = NULL; 2930 2931 if (sockptr_is_null(optval) && !optlen) { 2932 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); 2933 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); 2934 __sk_dst_reset(sk); 2935 return 0; 2936 } 2937 2938 if (optlen <= 0 || optlen > PAGE_SIZE) 2939 return -EMSGSIZE; 2940 2941 data = memdup_sockptr(optval, optlen); 2942 if (IS_ERR(data)) 2943 return PTR_ERR(data); 2944 2945 if (in_compat_syscall()) { 2946 struct xfrm_translator *xtr = xfrm_get_translator(); 2947 2948 if (!xtr) { 2949 kfree(data); 2950 return -EOPNOTSUPP; 2951 } 2952 2953 err = xtr->xlate_user_policy_sockptr(&data, optlen); 2954 xfrm_put_translator(xtr); 2955 if (err) { 2956 kfree(data); 2957 return err; 2958 } 2959 } 2960 2961 err = -EINVAL; 2962 rcu_read_lock(); 2963 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2964 pol = km->compile_policy(sk, optname, data, 2965 optlen, &err); 2966 if (err >= 0) 2967 break; 2968 } 2969 rcu_read_unlock(); 2970 2971 if (err >= 0) { 2972 xfrm_sk_policy_insert(sk, err, pol); 2973 xfrm_pol_put(pol); 2974 __sk_dst_reset(sk); 2975 err = 0; 2976 } 2977 2978 kfree(data); 2979 return err; 2980 } 2981 EXPORT_SYMBOL(xfrm_user_policy); 2982 2983 static DEFINE_SPINLOCK(xfrm_km_lock); 2984 2985 void xfrm_register_km(struct xfrm_mgr *km) 2986 { 2987 spin_lock_bh(&xfrm_km_lock); 2988 list_add_tail_rcu(&km->list, &xfrm_km_list); 2989 spin_unlock_bh(&xfrm_km_lock); 2990 } 2991 EXPORT_SYMBOL(xfrm_register_km); 2992 2993 void xfrm_unregister_km(struct xfrm_mgr *km) 2994 { 2995 spin_lock_bh(&xfrm_km_lock); 2996 list_del_rcu(&km->list); 2997 spin_unlock_bh(&xfrm_km_lock); 2998 synchronize_rcu(); 2999 } 3000 EXPORT_SYMBOL(xfrm_unregister_km); 3001 3002 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) 3003 { 3004 int err = 0; 3005 3006 if (WARN_ON(afinfo->family >= NPROTO)) 3007 return -EAFNOSUPPORT; 3008 3009 spin_lock_bh(&xfrm_state_afinfo_lock); 3010 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) 3011 err = -EEXIST; 3012 else 3013 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo); 3014 spin_unlock_bh(&xfrm_state_afinfo_lock); 3015 return err; 3016 } 3017 EXPORT_SYMBOL(xfrm_state_register_afinfo); 3018 3019 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) 3020 { 3021 int err = 0, family = afinfo->family; 3022 3023 if (WARN_ON(family >= NPROTO)) 3024 return -EAFNOSUPPORT; 3025 3026 spin_lock_bh(&xfrm_state_afinfo_lock); 3027 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { 3028 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo) 3029 err = -EINVAL; 3030 else 3031 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL); 3032 } 3033 spin_unlock_bh(&xfrm_state_afinfo_lock); 3034 synchronize_rcu(); 3035 return err; 3036 } 3037 EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 3038 3039 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family) 3040 { 3041 if (unlikely(family >= NPROTO)) 3042 return NULL; 3043 3044 return rcu_dereference(xfrm_state_afinfo[family]); 3045 } 3046 EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu); 3047 3048 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) 3049 { 3050 struct xfrm_state_afinfo *afinfo; 3051 if (unlikely(family >= NPROTO)) 3052 return NULL; 3053 rcu_read_lock(); 3054 afinfo = rcu_dereference(xfrm_state_afinfo[family]); 3055 if (unlikely(!afinfo)) 3056 rcu_read_unlock(); 3057 return afinfo; 3058 } 3059 3060 void xfrm_flush_gc(void) 3061 { 3062 flush_work(&xfrm_state_gc_work); 3063 } 3064 EXPORT_SYMBOL(xfrm_flush_gc); 3065 3066 static void xfrm_state_delete_tunnel(struct xfrm_state *x) 3067 { 3068 if (x->tunnel) { 3069 struct xfrm_state *t = x->tunnel; 3070 3071 if (atomic_dec_return(&t->tunnel_users) == 1) 3072 xfrm_state_delete(t); 3073 xfrm_state_put(t); 3074 x->tunnel = NULL; 3075 } 3076 } 3077 3078 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) 3079 { 3080 const struct xfrm_type *type = READ_ONCE(x->type); 3081 struct crypto_aead *aead; 3082 u32 blksize, net_adj = 0; 3083 3084 if (x->km.state != XFRM_STATE_VALID || 3085 !type || type->proto != IPPROTO_ESP) 3086 return mtu - x->props.header_len; 3087 3088 aead = x->data; 3089 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 3090 3091 switch (x->props.mode) { 3092 case XFRM_MODE_TRANSPORT: 3093 case XFRM_MODE_BEET: 3094 if (x->props.family == AF_INET) 3095 net_adj = sizeof(struct iphdr); 3096 else if (x->props.family == AF_INET6) 3097 net_adj = sizeof(struct ipv6hdr); 3098 break; 3099 case XFRM_MODE_TUNNEL: 3100 break; 3101 default: 3102 if (x->mode_cbs && x->mode_cbs->get_inner_mtu) 3103 return x->mode_cbs->get_inner_mtu(x, mtu); 3104 3105 WARN_ON_ONCE(1); 3106 break; 3107 } 3108 3109 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) - 3110 net_adj) & ~(blksize - 1)) + net_adj - 2; 3111 } 3112 EXPORT_SYMBOL_GPL(xfrm_state_mtu); 3113 3114 int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack) 3115 { 3116 const struct xfrm_mode *inner_mode; 3117 const struct xfrm_mode *outer_mode; 3118 int family = x->props.family; 3119 int err; 3120 3121 if (family == AF_INET && 3122 READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)) 3123 x->props.flags |= XFRM_STATE_NOPMTUDISC; 3124 3125 err = -EPROTONOSUPPORT; 3126 3127 if (x->sel.family != AF_UNSPEC) { 3128 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family); 3129 if (inner_mode == NULL) { 3130 NL_SET_ERR_MSG(extack, "Requested mode not found"); 3131 goto error; 3132 } 3133 3134 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) && 3135 family != x->sel.family) { 3136 NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate a change of family"); 3137 goto error; 3138 } 3139 3140 x->inner_mode = *inner_mode; 3141 } else { 3142 const struct xfrm_mode *inner_mode_iaf; 3143 int iafamily = AF_INET; 3144 3145 inner_mode = xfrm_get_mode(x->props.mode, x->props.family); 3146 if (inner_mode == NULL) { 3147 NL_SET_ERR_MSG(extack, "Requested mode not found"); 3148 goto error; 3149 } 3150 3151 x->inner_mode = *inner_mode; 3152 3153 if (x->props.family == AF_INET) 3154 iafamily = AF_INET6; 3155 3156 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily); 3157 if (inner_mode_iaf) { 3158 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL) 3159 x->inner_mode_iaf = *inner_mode_iaf; 3160 } 3161 } 3162 3163 x->type = xfrm_get_type(x->id.proto, family); 3164 if (x->type == NULL) { 3165 NL_SET_ERR_MSG(extack, "Requested type not found"); 3166 goto error; 3167 } 3168 3169 err = x->type->init_state(x, extack); 3170 if (err) 3171 goto error; 3172 3173 outer_mode = xfrm_get_mode(x->props.mode, family); 3174 if (!outer_mode) { 3175 NL_SET_ERR_MSG(extack, "Requested mode not found"); 3176 err = -EPROTONOSUPPORT; 3177 goto error; 3178 } 3179 3180 x->outer_mode = *outer_mode; 3181 if (x->nat_keepalive_interval) { 3182 if (x->dir != XFRM_SA_DIR_OUT) { 3183 NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs"); 3184 err = -EINVAL; 3185 goto error; 3186 } 3187 3188 if (!x->encap || x->encap->encap_type != UDP_ENCAP_ESPINUDP) { 3189 NL_SET_ERR_MSG(extack, 3190 "NAT keepalive is only supported for UDP encapsulation"); 3191 err = -EINVAL; 3192 goto error; 3193 } 3194 } 3195 3196 x->mode_cbs = xfrm_get_mode_cbs(x->props.mode); 3197 if (x->mode_cbs) { 3198 if (x->mode_cbs->init_state) 3199 err = x->mode_cbs->init_state(x); 3200 module_put(x->mode_cbs->owner); 3201 } 3202 error: 3203 return err; 3204 } 3205 3206 EXPORT_SYMBOL(__xfrm_init_state); 3207 3208 int xfrm_init_state(struct xfrm_state *x) 3209 { 3210 int err; 3211 3212 err = __xfrm_init_state(x, NULL); 3213 if (err) 3214 return err; 3215 3216 err = xfrm_init_replay(x, NULL); 3217 if (err) 3218 return err; 3219 3220 x->km.state = XFRM_STATE_VALID; 3221 return 0; 3222 } 3223 3224 EXPORT_SYMBOL(xfrm_init_state); 3225 3226 int __net_init xfrm_state_init(struct net *net) 3227 { 3228 unsigned int sz; 3229 3230 if (net_eq(net, &init_net)) 3231 xfrm_state_cache = KMEM_CACHE(xfrm_state, 3232 SLAB_HWCACHE_ALIGN | SLAB_PANIC); 3233 3234 INIT_LIST_HEAD(&net->xfrm.state_all); 3235 3236 sz = sizeof(struct hlist_head) * 8; 3237 3238 net->xfrm.state_bydst = xfrm_hash_alloc(sz); 3239 if (!net->xfrm.state_bydst) 3240 goto out_bydst; 3241 net->xfrm.state_bysrc = xfrm_hash_alloc(sz); 3242 if (!net->xfrm.state_bysrc) 3243 goto out_bysrc; 3244 net->xfrm.state_byspi = xfrm_hash_alloc(sz); 3245 if (!net->xfrm.state_byspi) 3246 goto out_byspi; 3247 net->xfrm.state_byseq = xfrm_hash_alloc(sz); 3248 if (!net->xfrm.state_byseq) 3249 goto out_byseq; 3250 3251 net->xfrm.state_cache_input = alloc_percpu(struct hlist_head); 3252 if (!net->xfrm.state_cache_input) 3253 goto out_state_cache_input; 3254 3255 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1); 3256 3257 net->xfrm.state_num = 0; 3258 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); 3259 spin_lock_init(&net->xfrm.xfrm_state_lock); 3260 seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation, 3261 &net->xfrm.xfrm_state_lock); 3262 return 0; 3263 3264 out_state_cache_input: 3265 xfrm_hash_free(net->xfrm.state_byseq, sz); 3266 out_byseq: 3267 xfrm_hash_free(net->xfrm.state_byspi, sz); 3268 out_byspi: 3269 xfrm_hash_free(net->xfrm.state_bysrc, sz); 3270 out_bysrc: 3271 xfrm_hash_free(net->xfrm.state_bydst, sz); 3272 out_bydst: 3273 return -ENOMEM; 3274 } 3275 3276 void xfrm_state_fini(struct net *net) 3277 { 3278 unsigned int sz; 3279 3280 flush_work(&net->xfrm.state_hash_work); 3281 xfrm_state_flush(net, IPSEC_PROTO_ANY, false); 3282 flush_work(&xfrm_state_gc_work); 3283 3284 WARN_ON(!list_empty(&net->xfrm.state_all)); 3285 3286 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head); 3287 WARN_ON(!hlist_empty(net->xfrm.state_byseq)); 3288 xfrm_hash_free(net->xfrm.state_byseq, sz); 3289 WARN_ON(!hlist_empty(net->xfrm.state_byspi)); 3290 xfrm_hash_free(net->xfrm.state_byspi, sz); 3291 WARN_ON(!hlist_empty(net->xfrm.state_bysrc)); 3292 xfrm_hash_free(net->xfrm.state_bysrc, sz); 3293 WARN_ON(!hlist_empty(net->xfrm.state_bydst)); 3294 xfrm_hash_free(net->xfrm.state_bydst, sz); 3295 free_percpu(net->xfrm.state_cache_input); 3296 } 3297 3298 #ifdef CONFIG_AUDITSYSCALL 3299 static void xfrm_audit_helper_sainfo(struct xfrm_state *x, 3300 struct audit_buffer *audit_buf) 3301 { 3302 struct xfrm_sec_ctx *ctx = x->security; 3303 u32 spi = ntohl(x->id.spi); 3304 3305 if (ctx) 3306 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 3307 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 3308 3309 switch (x->props.family) { 3310 case AF_INET: 3311 audit_log_format(audit_buf, " src=%pI4 dst=%pI4", 3312 &x->props.saddr.a4, &x->id.daddr.a4); 3313 break; 3314 case AF_INET6: 3315 audit_log_format(audit_buf, " src=%pI6 dst=%pI6", 3316 x->props.saddr.a6, x->id.daddr.a6); 3317 break; 3318 } 3319 3320 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); 3321 } 3322 3323 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, 3324 struct audit_buffer *audit_buf) 3325 { 3326 const struct iphdr *iph4; 3327 const struct ipv6hdr *iph6; 3328 3329 switch (family) { 3330 case AF_INET: 3331 iph4 = ip_hdr(skb); 3332 audit_log_format(audit_buf, " src=%pI4 dst=%pI4", 3333 &iph4->saddr, &iph4->daddr); 3334 break; 3335 case AF_INET6: 3336 iph6 = ipv6_hdr(skb); 3337 audit_log_format(audit_buf, 3338 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x", 3339 &iph6->saddr, &iph6->daddr, 3340 iph6->flow_lbl[0] & 0x0f, 3341 iph6->flow_lbl[1], 3342 iph6->flow_lbl[2]); 3343 break; 3344 } 3345 } 3346 3347 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid) 3348 { 3349 struct audit_buffer *audit_buf; 3350 3351 audit_buf = xfrm_audit_start("SAD-add"); 3352 if (audit_buf == NULL) 3353 return; 3354 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 3355 xfrm_audit_helper_sainfo(x, audit_buf); 3356 audit_log_format(audit_buf, " res=%u", result); 3357 audit_log_end(audit_buf); 3358 } 3359 EXPORT_SYMBOL_GPL(xfrm_audit_state_add); 3360 3361 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid) 3362 { 3363 struct audit_buffer *audit_buf; 3364 3365 audit_buf = xfrm_audit_start("SAD-delete"); 3366 if (audit_buf == NULL) 3367 return; 3368 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 3369 xfrm_audit_helper_sainfo(x, audit_buf); 3370 audit_log_format(audit_buf, " res=%u", result); 3371 audit_log_end(audit_buf); 3372 } 3373 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete); 3374 3375 void xfrm_audit_state_replay_overflow(struct xfrm_state *x, 3376 struct sk_buff *skb) 3377 { 3378 struct audit_buffer *audit_buf; 3379 u32 spi; 3380 3381 audit_buf = xfrm_audit_start("SA-replay-overflow"); 3382 if (audit_buf == NULL) 3383 return; 3384 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 3385 /* don't record the sequence number because it's inherent in this kind 3386 * of audit message */ 3387 spi = ntohl(x->id.spi); 3388 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); 3389 audit_log_end(audit_buf); 3390 } 3391 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow); 3392 3393 void xfrm_audit_state_replay(struct xfrm_state *x, 3394 struct sk_buff *skb, __be32 net_seq) 3395 { 3396 struct audit_buffer *audit_buf; 3397 u32 spi; 3398 3399 audit_buf = xfrm_audit_start("SA-replayed-pkt"); 3400 if (audit_buf == NULL) 3401 return; 3402 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 3403 spi = ntohl(x->id.spi); 3404 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 3405 spi, spi, ntohl(net_seq)); 3406 audit_log_end(audit_buf); 3407 } 3408 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay); 3409 3410 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family) 3411 { 3412 struct audit_buffer *audit_buf; 3413 3414 audit_buf = xfrm_audit_start("SA-notfound"); 3415 if (audit_buf == NULL) 3416 return; 3417 xfrm_audit_helper_pktinfo(skb, family, audit_buf); 3418 audit_log_end(audit_buf); 3419 } 3420 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple); 3421 3422 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, 3423 __be32 net_spi, __be32 net_seq) 3424 { 3425 struct audit_buffer *audit_buf; 3426 u32 spi; 3427 3428 audit_buf = xfrm_audit_start("SA-notfound"); 3429 if (audit_buf == NULL) 3430 return; 3431 xfrm_audit_helper_pktinfo(skb, family, audit_buf); 3432 spi = ntohl(net_spi); 3433 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 3434 spi, spi, ntohl(net_seq)); 3435 audit_log_end(audit_buf); 3436 } 3437 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound); 3438 3439 void xfrm_audit_state_icvfail(struct xfrm_state *x, 3440 struct sk_buff *skb, u8 proto) 3441 { 3442 struct audit_buffer *audit_buf; 3443 __be32 net_spi; 3444 __be32 net_seq; 3445 3446 audit_buf = xfrm_audit_start("SA-icv-failure"); 3447 if (audit_buf == NULL) 3448 return; 3449 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 3450 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) { 3451 u32 spi = ntohl(net_spi); 3452 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 3453 spi, spi, ntohl(net_seq)); 3454 } 3455 audit_log_end(audit_buf); 3456 } 3457 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail); 3458 #endif /* CONFIG_AUDITSYSCALL */ 3459