1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * xfrm_state.c 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * YOSHIFUJI Hideaki @USAGI 11 * Split up af-specific functions 12 * Derek Atkins <derek@ihtfp.com> 13 * Add UDP Encapsulation 14 * 15 */ 16 17 #include <linux/compat.h> 18 #include <linux/workqueue.h> 19 #include <net/xfrm.h> 20 #include <linux/pfkeyv2.h> 21 #include <linux/ipsec.h> 22 #include <linux/module.h> 23 #include <linux/cache.h> 24 #include <linux/audit.h> 25 #include <linux/uaccess.h> 26 #include <linux/ktime.h> 27 #include <linux/slab.h> 28 #include <linux/interrupt.h> 29 #include <linux/kernel.h> 30 31 #include <crypto/aead.h> 32 33 #include "xfrm_hash.h" 34 35 #define xfrm_state_deref_prot(table, net) \ 36 rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock)) 37 #define xfrm_state_deref_check(table, net) \ 38 rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock)) 39 40 static void xfrm_state_gc_task(struct work_struct *work); 41 42 /* Each xfrm_state may be linked to two tables: 43 44 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl) 45 2. Hash table by (daddr,family,reqid) to find what SAs exist for given 46 destination/tunnel endpoint. (output) 47 */ 48 49 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 50 static struct kmem_cache *xfrm_state_cache __ro_after_init; 51 52 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task); 53 static HLIST_HEAD(xfrm_state_gc_list); 54 static HLIST_HEAD(xfrm_state_dev_gc_list); 55 56 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x) 57 { 58 return refcount_inc_not_zero(&x->refcnt); 59 } 60 61 static inline unsigned int xfrm_dst_hash(struct net *net, 62 const xfrm_address_t *daddr, 63 const xfrm_address_t *saddr, 64 u32 reqid, 65 unsigned short family) 66 { 67 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 68 69 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask); 70 } 71 72 static inline unsigned int xfrm_src_hash(struct net *net, 73 const xfrm_address_t *daddr, 74 const xfrm_address_t *saddr, 75 unsigned short family) 76 { 77 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 78 79 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask); 80 } 81 82 static inline unsigned int 83 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr, 84 __be32 spi, u8 proto, unsigned short family) 85 { 86 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 87 88 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask); 89 } 90 91 static unsigned int xfrm_seq_hash(struct net *net, u32 seq) 92 { 93 lockdep_assert_held(&net->xfrm.xfrm_state_lock); 94 95 return __xfrm_seq_hash(seq, net->xfrm.state_hmask); 96 } 97 98 #define XFRM_STATE_INSERT(by, _n, _h, _type) \ 99 { \ 100 struct xfrm_state *_x = NULL; \ 101 \ 102 if (_type != XFRM_DEV_OFFLOAD_PACKET) { \ 103 hlist_for_each_entry_rcu(_x, _h, by) { \ 104 if (_x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \ 105 continue; \ 106 break; \ 107 } \ 108 } \ 109 \ 110 if (!_x || _x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \ 111 /* SAD is empty or consist from HW SAs only */ \ 112 hlist_add_head_rcu(_n, _h); \ 113 else \ 114 hlist_add_before_rcu(_n, &_x->by); \ 115 } 116 117 static void xfrm_hash_transfer(struct hlist_head *list, 118 struct hlist_head *ndsttable, 119 struct hlist_head *nsrctable, 120 struct hlist_head *nspitable, 121 struct hlist_head *nseqtable, 122 unsigned int nhashmask) 123 { 124 struct hlist_node *tmp; 125 struct xfrm_state *x; 126 127 hlist_for_each_entry_safe(x, tmp, list, bydst) { 128 unsigned int h; 129 130 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr, 131 x->props.reqid, x->props.family, 132 nhashmask); 133 XFRM_STATE_INSERT(bydst, &x->bydst, ndsttable + h, x->xso.type); 134 135 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr, 136 x->props.family, 137 nhashmask); 138 XFRM_STATE_INSERT(bysrc, &x->bysrc, nsrctable + h, x->xso.type); 139 140 if (x->id.spi) { 141 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi, 142 x->id.proto, x->props.family, 143 nhashmask); 144 XFRM_STATE_INSERT(byspi, &x->byspi, nspitable + h, 145 x->xso.type); 146 } 147 148 if (x->km.seq) { 149 h = __xfrm_seq_hash(x->km.seq, nhashmask); 150 XFRM_STATE_INSERT(byseq, &x->byseq, nseqtable + h, 151 x->xso.type); 152 } 153 } 154 } 155 156 static unsigned long xfrm_hash_new_size(unsigned int state_hmask) 157 { 158 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head); 159 } 160 161 static void xfrm_hash_resize(struct work_struct *work) 162 { 163 struct net *net = container_of(work, struct net, xfrm.state_hash_work); 164 struct hlist_head *ndst, *nsrc, *nspi, *nseq, *odst, *osrc, *ospi, *oseq; 165 unsigned long nsize, osize; 166 unsigned int nhashmask, ohashmask; 167 int i; 168 169 nsize = xfrm_hash_new_size(net->xfrm.state_hmask); 170 ndst = xfrm_hash_alloc(nsize); 171 if (!ndst) 172 return; 173 nsrc = xfrm_hash_alloc(nsize); 174 if (!nsrc) { 175 xfrm_hash_free(ndst, nsize); 176 return; 177 } 178 nspi = xfrm_hash_alloc(nsize); 179 if (!nspi) { 180 xfrm_hash_free(ndst, nsize); 181 xfrm_hash_free(nsrc, nsize); 182 return; 183 } 184 nseq = xfrm_hash_alloc(nsize); 185 if (!nseq) { 186 xfrm_hash_free(ndst, nsize); 187 xfrm_hash_free(nsrc, nsize); 188 xfrm_hash_free(nspi, nsize); 189 return; 190 } 191 192 spin_lock_bh(&net->xfrm.xfrm_state_lock); 193 write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); 194 195 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U; 196 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net); 197 for (i = net->xfrm.state_hmask; i >= 0; i--) 198 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nseq, nhashmask); 199 200 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net); 201 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net); 202 oseq = xfrm_state_deref_prot(net->xfrm.state_byseq, net); 203 ohashmask = net->xfrm.state_hmask; 204 205 rcu_assign_pointer(net->xfrm.state_bydst, ndst); 206 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc); 207 rcu_assign_pointer(net->xfrm.state_byspi, nspi); 208 rcu_assign_pointer(net->xfrm.state_byseq, nseq); 209 net->xfrm.state_hmask = nhashmask; 210 211 write_seqcount_end(&net->xfrm.xfrm_state_hash_generation); 212 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 213 214 osize = (ohashmask + 1) * sizeof(struct hlist_head); 215 216 synchronize_rcu(); 217 218 xfrm_hash_free(odst, osize); 219 xfrm_hash_free(osrc, osize); 220 xfrm_hash_free(ospi, osize); 221 xfrm_hash_free(oseq, osize); 222 } 223 224 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock); 225 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO]; 226 227 static DEFINE_SPINLOCK(xfrm_state_gc_lock); 228 static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock); 229 230 int __xfrm_state_delete(struct xfrm_state *x); 231 232 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); 233 static bool km_is_alive(const struct km_event *c); 234 void km_state_expired(struct xfrm_state *x, int hard, u32 portid); 235 236 int xfrm_register_type(const struct xfrm_type *type, unsigned short family) 237 { 238 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 239 int err = 0; 240 241 if (!afinfo) 242 return -EAFNOSUPPORT; 243 244 #define X(afi, T, name) do { \ 245 WARN_ON((afi)->type_ ## name); \ 246 (afi)->type_ ## name = (T); \ 247 } while (0) 248 249 switch (type->proto) { 250 case IPPROTO_COMP: 251 X(afinfo, type, comp); 252 break; 253 case IPPROTO_AH: 254 X(afinfo, type, ah); 255 break; 256 case IPPROTO_ESP: 257 X(afinfo, type, esp); 258 break; 259 case IPPROTO_IPIP: 260 X(afinfo, type, ipip); 261 break; 262 case IPPROTO_DSTOPTS: 263 X(afinfo, type, dstopts); 264 break; 265 case IPPROTO_ROUTING: 266 X(afinfo, type, routing); 267 break; 268 case IPPROTO_IPV6: 269 X(afinfo, type, ipip6); 270 break; 271 default: 272 WARN_ON(1); 273 err = -EPROTONOSUPPORT; 274 break; 275 } 276 #undef X 277 rcu_read_unlock(); 278 return err; 279 } 280 EXPORT_SYMBOL(xfrm_register_type); 281 282 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family) 283 { 284 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 285 286 if (unlikely(afinfo == NULL)) 287 return; 288 289 #define X(afi, T, name) do { \ 290 WARN_ON((afi)->type_ ## name != (T)); \ 291 (afi)->type_ ## name = NULL; \ 292 } while (0) 293 294 switch (type->proto) { 295 case IPPROTO_COMP: 296 X(afinfo, type, comp); 297 break; 298 case IPPROTO_AH: 299 X(afinfo, type, ah); 300 break; 301 case IPPROTO_ESP: 302 X(afinfo, type, esp); 303 break; 304 case IPPROTO_IPIP: 305 X(afinfo, type, ipip); 306 break; 307 case IPPROTO_DSTOPTS: 308 X(afinfo, type, dstopts); 309 break; 310 case IPPROTO_ROUTING: 311 X(afinfo, type, routing); 312 break; 313 case IPPROTO_IPV6: 314 X(afinfo, type, ipip6); 315 break; 316 default: 317 WARN_ON(1); 318 break; 319 } 320 #undef X 321 rcu_read_unlock(); 322 } 323 EXPORT_SYMBOL(xfrm_unregister_type); 324 325 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family) 326 { 327 const struct xfrm_type *type = NULL; 328 struct xfrm_state_afinfo *afinfo; 329 int modload_attempted = 0; 330 331 retry: 332 afinfo = xfrm_state_get_afinfo(family); 333 if (unlikely(afinfo == NULL)) 334 return NULL; 335 336 switch (proto) { 337 case IPPROTO_COMP: 338 type = afinfo->type_comp; 339 break; 340 case IPPROTO_AH: 341 type = afinfo->type_ah; 342 break; 343 case IPPROTO_ESP: 344 type = afinfo->type_esp; 345 break; 346 case IPPROTO_IPIP: 347 type = afinfo->type_ipip; 348 break; 349 case IPPROTO_DSTOPTS: 350 type = afinfo->type_dstopts; 351 break; 352 case IPPROTO_ROUTING: 353 type = afinfo->type_routing; 354 break; 355 case IPPROTO_IPV6: 356 type = afinfo->type_ipip6; 357 break; 358 default: 359 break; 360 } 361 362 if (unlikely(type && !try_module_get(type->owner))) 363 type = NULL; 364 365 rcu_read_unlock(); 366 367 if (!type && !modload_attempted) { 368 request_module("xfrm-type-%d-%d", family, proto); 369 modload_attempted = 1; 370 goto retry; 371 } 372 373 return type; 374 } 375 376 static void xfrm_put_type(const struct xfrm_type *type) 377 { 378 module_put(type->owner); 379 } 380 381 int xfrm_register_type_offload(const struct xfrm_type_offload *type, 382 unsigned short family) 383 { 384 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 385 int err = 0; 386 387 if (unlikely(afinfo == NULL)) 388 return -EAFNOSUPPORT; 389 390 switch (type->proto) { 391 case IPPROTO_ESP: 392 WARN_ON(afinfo->type_offload_esp); 393 afinfo->type_offload_esp = type; 394 break; 395 default: 396 WARN_ON(1); 397 err = -EPROTONOSUPPORT; 398 break; 399 } 400 401 rcu_read_unlock(); 402 return err; 403 } 404 EXPORT_SYMBOL(xfrm_register_type_offload); 405 406 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, 407 unsigned short family) 408 { 409 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family); 410 411 if (unlikely(afinfo == NULL)) 412 return; 413 414 switch (type->proto) { 415 case IPPROTO_ESP: 416 WARN_ON(afinfo->type_offload_esp != type); 417 afinfo->type_offload_esp = NULL; 418 break; 419 default: 420 WARN_ON(1); 421 break; 422 } 423 rcu_read_unlock(); 424 } 425 EXPORT_SYMBOL(xfrm_unregister_type_offload); 426 427 static const struct xfrm_type_offload * 428 xfrm_get_type_offload(u8 proto, unsigned short family, bool try_load) 429 { 430 const struct xfrm_type_offload *type = NULL; 431 struct xfrm_state_afinfo *afinfo; 432 433 retry: 434 afinfo = xfrm_state_get_afinfo(family); 435 if (unlikely(afinfo == NULL)) 436 return NULL; 437 438 switch (proto) { 439 case IPPROTO_ESP: 440 type = afinfo->type_offload_esp; 441 break; 442 default: 443 break; 444 } 445 446 if ((type && !try_module_get(type->owner))) 447 type = NULL; 448 449 rcu_read_unlock(); 450 451 if (!type && try_load) { 452 request_module("xfrm-offload-%d-%d", family, proto); 453 try_load = false; 454 goto retry; 455 } 456 457 return type; 458 } 459 460 static void xfrm_put_type_offload(const struct xfrm_type_offload *type) 461 { 462 module_put(type->owner); 463 } 464 465 static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = { 466 [XFRM_MODE_BEET] = { 467 .encap = XFRM_MODE_BEET, 468 .flags = XFRM_MODE_FLAG_TUNNEL, 469 .family = AF_INET, 470 }, 471 [XFRM_MODE_TRANSPORT] = { 472 .encap = XFRM_MODE_TRANSPORT, 473 .family = AF_INET, 474 }, 475 [XFRM_MODE_TUNNEL] = { 476 .encap = XFRM_MODE_TUNNEL, 477 .flags = XFRM_MODE_FLAG_TUNNEL, 478 .family = AF_INET, 479 }, 480 [XFRM_MODE_IPTFS] = { 481 .encap = XFRM_MODE_IPTFS, 482 .flags = XFRM_MODE_FLAG_TUNNEL, 483 .family = AF_INET, 484 }, 485 }; 486 487 static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = { 488 [XFRM_MODE_BEET] = { 489 .encap = XFRM_MODE_BEET, 490 .flags = XFRM_MODE_FLAG_TUNNEL, 491 .family = AF_INET6, 492 }, 493 [XFRM_MODE_ROUTEOPTIMIZATION] = { 494 .encap = XFRM_MODE_ROUTEOPTIMIZATION, 495 .family = AF_INET6, 496 }, 497 [XFRM_MODE_TRANSPORT] = { 498 .encap = XFRM_MODE_TRANSPORT, 499 .family = AF_INET6, 500 }, 501 [XFRM_MODE_TUNNEL] = { 502 .encap = XFRM_MODE_TUNNEL, 503 .flags = XFRM_MODE_FLAG_TUNNEL, 504 .family = AF_INET6, 505 }, 506 [XFRM_MODE_IPTFS] = { 507 .encap = XFRM_MODE_IPTFS, 508 .flags = XFRM_MODE_FLAG_TUNNEL, 509 .family = AF_INET6, 510 }, 511 }; 512 513 static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family) 514 { 515 const struct xfrm_mode *mode; 516 517 if (unlikely(encap >= XFRM_MODE_MAX)) 518 return NULL; 519 520 switch (family) { 521 case AF_INET: 522 mode = &xfrm4_mode_map[encap]; 523 if (mode->family == family) 524 return mode; 525 break; 526 case AF_INET6: 527 mode = &xfrm6_mode_map[encap]; 528 if (mode->family == family) 529 return mode; 530 break; 531 default: 532 break; 533 } 534 535 return NULL; 536 } 537 538 static const struct xfrm_mode_cbs __rcu *xfrm_mode_cbs_map[XFRM_MODE_MAX]; 539 static DEFINE_SPINLOCK(xfrm_mode_cbs_map_lock); 540 541 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs) 542 { 543 if (mode >= XFRM_MODE_MAX) 544 return -EINVAL; 545 546 spin_lock_bh(&xfrm_mode_cbs_map_lock); 547 rcu_assign_pointer(xfrm_mode_cbs_map[mode], mode_cbs); 548 spin_unlock_bh(&xfrm_mode_cbs_map_lock); 549 550 return 0; 551 } 552 EXPORT_SYMBOL(xfrm_register_mode_cbs); 553 554 void xfrm_unregister_mode_cbs(u8 mode) 555 { 556 if (mode >= XFRM_MODE_MAX) 557 return; 558 559 spin_lock_bh(&xfrm_mode_cbs_map_lock); 560 RCU_INIT_POINTER(xfrm_mode_cbs_map[mode], NULL); 561 spin_unlock_bh(&xfrm_mode_cbs_map_lock); 562 synchronize_rcu(); 563 } 564 EXPORT_SYMBOL(xfrm_unregister_mode_cbs); 565 566 static const struct xfrm_mode_cbs *xfrm_get_mode_cbs(u8 mode) 567 { 568 const struct xfrm_mode_cbs *cbs; 569 bool try_load = true; 570 571 if (mode >= XFRM_MODE_MAX) 572 return NULL; 573 574 retry: 575 rcu_read_lock(); 576 577 cbs = rcu_dereference(xfrm_mode_cbs_map[mode]); 578 if (cbs && !try_module_get(cbs->owner)) 579 cbs = NULL; 580 581 rcu_read_unlock(); 582 583 if (mode == XFRM_MODE_IPTFS && !cbs && try_load) { 584 request_module("xfrm-iptfs"); 585 try_load = false; 586 goto retry; 587 } 588 589 return cbs; 590 } 591 592 void xfrm_state_free(struct xfrm_state *x) 593 { 594 kmem_cache_free(xfrm_state_cache, x); 595 } 596 EXPORT_SYMBOL(xfrm_state_free); 597 598 static void ___xfrm_state_destroy(struct xfrm_state *x) 599 { 600 if (x->mode_cbs && x->mode_cbs->destroy_state) 601 x->mode_cbs->destroy_state(x); 602 hrtimer_cancel(&x->mtimer); 603 del_timer_sync(&x->rtimer); 604 kfree(x->aead); 605 kfree(x->aalg); 606 kfree(x->ealg); 607 kfree(x->calg); 608 kfree(x->encap); 609 kfree(x->coaddr); 610 kfree(x->replay_esn); 611 kfree(x->preplay_esn); 612 if (x->type_offload) 613 xfrm_put_type_offload(x->type_offload); 614 if (x->type) { 615 x->type->destructor(x); 616 xfrm_put_type(x->type); 617 } 618 if (x->xfrag.page) 619 put_page(x->xfrag.page); 620 xfrm_dev_state_free(x); 621 security_xfrm_state_free(x); 622 xfrm_state_free(x); 623 } 624 625 static void xfrm_state_gc_task(struct work_struct *work) 626 { 627 struct xfrm_state *x; 628 struct hlist_node *tmp; 629 struct hlist_head gc_list; 630 631 spin_lock_bh(&xfrm_state_gc_lock); 632 hlist_move_list(&xfrm_state_gc_list, &gc_list); 633 spin_unlock_bh(&xfrm_state_gc_lock); 634 635 synchronize_rcu(); 636 637 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) 638 ___xfrm_state_destroy(x); 639 } 640 641 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) 642 { 643 struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer); 644 enum hrtimer_restart ret = HRTIMER_NORESTART; 645 time64_t now = ktime_get_real_seconds(); 646 time64_t next = TIME64_MAX; 647 int warn = 0; 648 int err = 0; 649 650 spin_lock(&x->lock); 651 xfrm_dev_state_update_stats(x); 652 653 if (x->km.state == XFRM_STATE_DEAD) 654 goto out; 655 if (x->km.state == XFRM_STATE_EXPIRED) 656 goto expired; 657 if (x->lft.hard_add_expires_seconds) { 658 time64_t tmo = x->lft.hard_add_expires_seconds + 659 x->curlft.add_time - now; 660 if (tmo <= 0) { 661 if (x->xflags & XFRM_SOFT_EXPIRE) { 662 /* enter hard expire without soft expire first?! 663 * setting a new date could trigger this. 664 * workaround: fix x->curflt.add_time by below: 665 */ 666 x->curlft.add_time = now - x->saved_tmo - 1; 667 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo; 668 } else 669 goto expired; 670 } 671 if (tmo < next) 672 next = tmo; 673 } 674 if (x->lft.hard_use_expires_seconds) { 675 time64_t tmo = x->lft.hard_use_expires_seconds + 676 (READ_ONCE(x->curlft.use_time) ? : now) - now; 677 if (tmo <= 0) 678 goto expired; 679 if (tmo < next) 680 next = tmo; 681 } 682 if (x->km.dying) 683 goto resched; 684 if (x->lft.soft_add_expires_seconds) { 685 time64_t tmo = x->lft.soft_add_expires_seconds + 686 x->curlft.add_time - now; 687 if (tmo <= 0) { 688 warn = 1; 689 x->xflags &= ~XFRM_SOFT_EXPIRE; 690 } else if (tmo < next) { 691 next = tmo; 692 x->xflags |= XFRM_SOFT_EXPIRE; 693 x->saved_tmo = tmo; 694 } 695 } 696 if (x->lft.soft_use_expires_seconds) { 697 time64_t tmo = x->lft.soft_use_expires_seconds + 698 (READ_ONCE(x->curlft.use_time) ? : now) - now; 699 if (tmo <= 0) 700 warn = 1; 701 else if (tmo < next) 702 next = tmo; 703 } 704 705 x->km.dying = warn; 706 if (warn) 707 km_state_expired(x, 0, 0); 708 resched: 709 if (next != TIME64_MAX) { 710 hrtimer_forward_now(&x->mtimer, ktime_set(next, 0)); 711 ret = HRTIMER_RESTART; 712 } 713 714 goto out; 715 716 expired: 717 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0) 718 x->km.state = XFRM_STATE_EXPIRED; 719 720 err = __xfrm_state_delete(x); 721 if (!err) 722 km_state_expired(x, 1, 0); 723 724 xfrm_audit_state_delete(x, err ? 0 : 1, true); 725 726 out: 727 spin_unlock(&x->lock); 728 return ret; 729 } 730 731 static void xfrm_replay_timer_handler(struct timer_list *t); 732 733 struct xfrm_state *xfrm_state_alloc(struct net *net) 734 { 735 struct xfrm_state *x; 736 737 x = kmem_cache_zalloc(xfrm_state_cache, GFP_ATOMIC); 738 739 if (x) { 740 write_pnet(&x->xs_net, net); 741 refcount_set(&x->refcnt, 1); 742 atomic_set(&x->tunnel_users, 0); 743 INIT_LIST_HEAD(&x->km.all); 744 INIT_HLIST_NODE(&x->state_cache); 745 INIT_HLIST_NODE(&x->bydst); 746 INIT_HLIST_NODE(&x->bysrc); 747 INIT_HLIST_NODE(&x->byspi); 748 INIT_HLIST_NODE(&x->byseq); 749 hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT); 750 x->mtimer.function = xfrm_timer_handler; 751 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0); 752 x->curlft.add_time = ktime_get_real_seconds(); 753 x->lft.soft_byte_limit = XFRM_INF; 754 x->lft.soft_packet_limit = XFRM_INF; 755 x->lft.hard_byte_limit = XFRM_INF; 756 x->lft.hard_packet_limit = XFRM_INF; 757 x->replay_maxage = 0; 758 x->replay_maxdiff = 0; 759 x->pcpu_num = UINT_MAX; 760 spin_lock_init(&x->lock); 761 x->mode_data = NULL; 762 } 763 return x; 764 } 765 EXPORT_SYMBOL(xfrm_state_alloc); 766 767 #ifdef CONFIG_XFRM_OFFLOAD 768 void xfrm_dev_state_delete(struct xfrm_state *x) 769 { 770 struct xfrm_dev_offload *xso = &x->xso; 771 struct net_device *dev = READ_ONCE(xso->dev); 772 773 if (dev) { 774 dev->xfrmdev_ops->xdo_dev_state_delete(x); 775 spin_lock_bh(&xfrm_state_dev_gc_lock); 776 hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list); 777 spin_unlock_bh(&xfrm_state_dev_gc_lock); 778 } 779 } 780 EXPORT_SYMBOL_GPL(xfrm_dev_state_delete); 781 782 void xfrm_dev_state_free(struct xfrm_state *x) 783 { 784 struct xfrm_dev_offload *xso = &x->xso; 785 struct net_device *dev = READ_ONCE(xso->dev); 786 787 if (dev && dev->xfrmdev_ops) { 788 spin_lock_bh(&xfrm_state_dev_gc_lock); 789 if (!hlist_unhashed(&x->dev_gclist)) 790 hlist_del(&x->dev_gclist); 791 spin_unlock_bh(&xfrm_state_dev_gc_lock); 792 793 if (dev->xfrmdev_ops->xdo_dev_state_free) 794 dev->xfrmdev_ops->xdo_dev_state_free(x); 795 WRITE_ONCE(xso->dev, NULL); 796 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 797 netdev_put(dev, &xso->dev_tracker); 798 } 799 } 800 #endif 801 802 void __xfrm_state_destroy(struct xfrm_state *x, bool sync) 803 { 804 WARN_ON(x->km.state != XFRM_STATE_DEAD); 805 806 if (sync) { 807 synchronize_rcu(); 808 ___xfrm_state_destroy(x); 809 } else { 810 spin_lock_bh(&xfrm_state_gc_lock); 811 hlist_add_head(&x->gclist, &xfrm_state_gc_list); 812 spin_unlock_bh(&xfrm_state_gc_lock); 813 schedule_work(&xfrm_state_gc_work); 814 } 815 } 816 EXPORT_SYMBOL(__xfrm_state_destroy); 817 818 int __xfrm_state_delete(struct xfrm_state *x) 819 { 820 struct net *net = xs_net(x); 821 int err = -ESRCH; 822 823 if (x->km.state != XFRM_STATE_DEAD) { 824 x->km.state = XFRM_STATE_DEAD; 825 826 spin_lock(&net->xfrm.xfrm_state_lock); 827 list_del(&x->km.all); 828 hlist_del_rcu(&x->bydst); 829 hlist_del_rcu(&x->bysrc); 830 if (x->km.seq) 831 hlist_del_rcu(&x->byseq); 832 if (!hlist_unhashed(&x->state_cache)) 833 hlist_del_rcu(&x->state_cache); 834 if (!hlist_unhashed(&x->state_cache_input)) 835 hlist_del_rcu(&x->state_cache_input); 836 837 if (x->id.spi) 838 hlist_del_rcu(&x->byspi); 839 net->xfrm.state_num--; 840 xfrm_nat_keepalive_state_updated(x); 841 spin_unlock(&net->xfrm.xfrm_state_lock); 842 843 if (x->encap_sk) 844 sock_put(rcu_dereference_raw(x->encap_sk)); 845 846 xfrm_dev_state_delete(x); 847 848 /* All xfrm_state objects are created by xfrm_state_alloc. 849 * The xfrm_state_alloc call gives a reference, and that 850 * is what we are dropping here. 851 */ 852 xfrm_state_put(x); 853 err = 0; 854 } 855 856 return err; 857 } 858 EXPORT_SYMBOL(__xfrm_state_delete); 859 860 int xfrm_state_delete(struct xfrm_state *x) 861 { 862 int err; 863 864 spin_lock_bh(&x->lock); 865 err = __xfrm_state_delete(x); 866 spin_unlock_bh(&x->lock); 867 868 return err; 869 } 870 EXPORT_SYMBOL(xfrm_state_delete); 871 872 #ifdef CONFIG_SECURITY_NETWORK_XFRM 873 static inline int 874 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) 875 { 876 int i, err = 0; 877 878 for (i = 0; i <= net->xfrm.state_hmask; i++) { 879 struct xfrm_state *x; 880 881 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 882 if (xfrm_id_proto_match(x->id.proto, proto) && 883 (err = security_xfrm_state_delete(x)) != 0) { 884 xfrm_audit_state_delete(x, 0, task_valid); 885 return err; 886 } 887 } 888 } 889 890 return err; 891 } 892 893 static inline int 894 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid) 895 { 896 int i, err = 0; 897 898 for (i = 0; i <= net->xfrm.state_hmask; i++) { 899 struct xfrm_state *x; 900 struct xfrm_dev_offload *xso; 901 902 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 903 xso = &x->xso; 904 905 if (xso->dev == dev && 906 (err = security_xfrm_state_delete(x)) != 0) { 907 xfrm_audit_state_delete(x, 0, task_valid); 908 return err; 909 } 910 } 911 } 912 913 return err; 914 } 915 #else 916 static inline int 917 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid) 918 { 919 return 0; 920 } 921 922 static inline int 923 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid) 924 { 925 return 0; 926 } 927 #endif 928 929 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync) 930 { 931 int i, err = 0, cnt = 0; 932 933 spin_lock_bh(&net->xfrm.xfrm_state_lock); 934 err = xfrm_state_flush_secctx_check(net, proto, task_valid); 935 if (err) 936 goto out; 937 938 err = -ESRCH; 939 for (i = 0; i <= net->xfrm.state_hmask; i++) { 940 struct xfrm_state *x; 941 restart: 942 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 943 if (!xfrm_state_kern(x) && 944 xfrm_id_proto_match(x->id.proto, proto)) { 945 xfrm_state_hold(x); 946 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 947 948 err = xfrm_state_delete(x); 949 xfrm_audit_state_delete(x, err ? 0 : 1, 950 task_valid); 951 if (sync) 952 xfrm_state_put_sync(x); 953 else 954 xfrm_state_put(x); 955 if (!err) 956 cnt++; 957 958 spin_lock_bh(&net->xfrm.xfrm_state_lock); 959 goto restart; 960 } 961 } 962 } 963 out: 964 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 965 if (cnt) 966 err = 0; 967 968 return err; 969 } 970 EXPORT_SYMBOL(xfrm_state_flush); 971 972 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid) 973 { 974 struct xfrm_state *x; 975 struct hlist_node *tmp; 976 struct xfrm_dev_offload *xso; 977 int i, err = 0, cnt = 0; 978 979 spin_lock_bh(&net->xfrm.xfrm_state_lock); 980 err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid); 981 if (err) 982 goto out; 983 984 err = -ESRCH; 985 for (i = 0; i <= net->xfrm.state_hmask; i++) { 986 restart: 987 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { 988 xso = &x->xso; 989 990 if (!xfrm_state_kern(x) && xso->dev == dev) { 991 xfrm_state_hold(x); 992 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 993 994 err = xfrm_state_delete(x); 995 xfrm_dev_state_free(x); 996 997 xfrm_audit_state_delete(x, err ? 0 : 1, 998 task_valid); 999 xfrm_state_put(x); 1000 if (!err) 1001 cnt++; 1002 1003 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1004 goto restart; 1005 } 1006 } 1007 } 1008 if (cnt) 1009 err = 0; 1010 1011 out: 1012 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1013 1014 spin_lock_bh(&xfrm_state_dev_gc_lock); 1015 restart_gc: 1016 hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) { 1017 xso = &x->xso; 1018 1019 if (xso->dev == dev) { 1020 spin_unlock_bh(&xfrm_state_dev_gc_lock); 1021 xfrm_dev_state_free(x); 1022 spin_lock_bh(&xfrm_state_dev_gc_lock); 1023 goto restart_gc; 1024 } 1025 1026 } 1027 spin_unlock_bh(&xfrm_state_dev_gc_lock); 1028 1029 xfrm_flush_gc(); 1030 1031 return err; 1032 } 1033 EXPORT_SYMBOL(xfrm_dev_state_flush); 1034 1035 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si) 1036 { 1037 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1038 si->sadcnt = net->xfrm.state_num; 1039 si->sadhcnt = net->xfrm.state_hmask + 1; 1040 si->sadhmcnt = xfrm_state_hashmax; 1041 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1042 } 1043 EXPORT_SYMBOL(xfrm_sad_getinfo); 1044 1045 static void 1046 __xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) 1047 { 1048 const struct flowi4 *fl4 = &fl->u.ip4; 1049 1050 sel->daddr.a4 = fl4->daddr; 1051 sel->saddr.a4 = fl4->saddr; 1052 sel->dport = xfrm_flowi_dport(fl, &fl4->uli); 1053 sel->dport_mask = htons(0xffff); 1054 sel->sport = xfrm_flowi_sport(fl, &fl4->uli); 1055 sel->sport_mask = htons(0xffff); 1056 sel->family = AF_INET; 1057 sel->prefixlen_d = 32; 1058 sel->prefixlen_s = 32; 1059 sel->proto = fl4->flowi4_proto; 1060 sel->ifindex = fl4->flowi4_oif; 1061 } 1062 1063 static void 1064 __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl) 1065 { 1066 const struct flowi6 *fl6 = &fl->u.ip6; 1067 1068 /* Initialize temporary selector matching only to current session. */ 1069 *(struct in6_addr *)&sel->daddr = fl6->daddr; 1070 *(struct in6_addr *)&sel->saddr = fl6->saddr; 1071 sel->dport = xfrm_flowi_dport(fl, &fl6->uli); 1072 sel->dport_mask = htons(0xffff); 1073 sel->sport = xfrm_flowi_sport(fl, &fl6->uli); 1074 sel->sport_mask = htons(0xffff); 1075 sel->family = AF_INET6; 1076 sel->prefixlen_d = 128; 1077 sel->prefixlen_s = 128; 1078 sel->proto = fl6->flowi6_proto; 1079 sel->ifindex = fl6->flowi6_oif; 1080 } 1081 1082 static void 1083 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl, 1084 const struct xfrm_tmpl *tmpl, 1085 const xfrm_address_t *daddr, const xfrm_address_t *saddr, 1086 unsigned short family) 1087 { 1088 switch (family) { 1089 case AF_INET: 1090 __xfrm4_init_tempsel(&x->sel, fl); 1091 break; 1092 case AF_INET6: 1093 __xfrm6_init_tempsel(&x->sel, fl); 1094 break; 1095 } 1096 1097 x->id = tmpl->id; 1098 1099 switch (tmpl->encap_family) { 1100 case AF_INET: 1101 if (x->id.daddr.a4 == 0) 1102 x->id.daddr.a4 = daddr->a4; 1103 x->props.saddr = tmpl->saddr; 1104 if (x->props.saddr.a4 == 0) 1105 x->props.saddr.a4 = saddr->a4; 1106 break; 1107 case AF_INET6: 1108 if (ipv6_addr_any((struct in6_addr *)&x->id.daddr)) 1109 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr)); 1110 memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr)); 1111 if (ipv6_addr_any((struct in6_addr *)&x->props.saddr)) 1112 memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr)); 1113 break; 1114 } 1115 1116 x->props.mode = tmpl->mode; 1117 x->props.reqid = tmpl->reqid; 1118 x->props.family = tmpl->encap_family; 1119 } 1120 1121 struct xfrm_hash_state_ptrs { 1122 const struct hlist_head *bydst; 1123 const struct hlist_head *bysrc; 1124 const struct hlist_head *byspi; 1125 unsigned int hmask; 1126 }; 1127 1128 static void xfrm_hash_ptrs_get(const struct net *net, struct xfrm_hash_state_ptrs *ptrs) 1129 { 1130 unsigned int sequence; 1131 1132 do { 1133 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); 1134 1135 ptrs->bydst = xfrm_state_deref_check(net->xfrm.state_bydst, net); 1136 ptrs->bysrc = xfrm_state_deref_check(net->xfrm.state_bysrc, net); 1137 ptrs->byspi = xfrm_state_deref_check(net->xfrm.state_byspi, net); 1138 ptrs->hmask = net->xfrm.state_hmask; 1139 } while (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)); 1140 } 1141 1142 static struct xfrm_state *__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs *state_ptrs, 1143 u32 mark, 1144 const xfrm_address_t *daddr, 1145 __be32 spi, u8 proto, 1146 unsigned short family, 1147 struct xfrm_dev_offload *xdo) 1148 { 1149 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask); 1150 struct xfrm_state *x; 1151 1152 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) { 1153 #ifdef CONFIG_XFRM_OFFLOAD 1154 if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) { 1155 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) 1156 /* HW states are in the head of list, there is 1157 * no need to iterate further. 1158 */ 1159 break; 1160 1161 /* Packet offload: both policy and SA should 1162 * have same device. 1163 */ 1164 if (xdo->dev != x->xso.dev) 1165 continue; 1166 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 1167 /* Skip HW policy for SW lookups */ 1168 continue; 1169 #endif 1170 if (x->props.family != family || 1171 x->id.spi != spi || 1172 x->id.proto != proto || 1173 !xfrm_addr_equal(&x->id.daddr, daddr, family)) 1174 continue; 1175 1176 if ((mark & x->mark.m) != x->mark.v) 1177 continue; 1178 if (!xfrm_state_hold_rcu(x)) 1179 continue; 1180 return x; 1181 } 1182 1183 return NULL; 1184 } 1185 1186 static struct xfrm_state *__xfrm_state_lookup(const struct xfrm_hash_state_ptrs *state_ptrs, 1187 u32 mark, 1188 const xfrm_address_t *daddr, 1189 __be32 spi, u8 proto, 1190 unsigned short family) 1191 { 1192 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask); 1193 struct xfrm_state *x; 1194 1195 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) { 1196 if (x->props.family != family || 1197 x->id.spi != spi || 1198 x->id.proto != proto || 1199 !xfrm_addr_equal(&x->id.daddr, daddr, family)) 1200 continue; 1201 1202 if ((mark & x->mark.m) != x->mark.v) 1203 continue; 1204 if (!xfrm_state_hold_rcu(x)) 1205 continue; 1206 return x; 1207 } 1208 1209 return NULL; 1210 } 1211 1212 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark, 1213 const xfrm_address_t *daddr, 1214 __be32 spi, u8 proto, 1215 unsigned short family) 1216 { 1217 struct xfrm_hash_state_ptrs state_ptrs; 1218 struct hlist_head *state_cache_input; 1219 struct xfrm_state *x = NULL; 1220 1221 state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input); 1222 1223 rcu_read_lock(); 1224 hlist_for_each_entry_rcu(x, state_cache_input, state_cache_input) { 1225 if (x->props.family != family || 1226 x->id.spi != spi || 1227 x->id.proto != proto || 1228 !xfrm_addr_equal(&x->id.daddr, daddr, family)) 1229 continue; 1230 1231 if ((mark & x->mark.m) != x->mark.v) 1232 continue; 1233 if (!xfrm_state_hold_rcu(x)) 1234 continue; 1235 goto out; 1236 } 1237 1238 xfrm_hash_ptrs_get(net, &state_ptrs); 1239 1240 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family); 1241 1242 if (x && x->km.state == XFRM_STATE_VALID) { 1243 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1244 if (hlist_unhashed(&x->state_cache_input)) { 1245 hlist_add_head_rcu(&x->state_cache_input, state_cache_input); 1246 } else { 1247 hlist_del_rcu(&x->state_cache_input); 1248 hlist_add_head_rcu(&x->state_cache_input, state_cache_input); 1249 } 1250 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1251 } 1252 1253 out: 1254 rcu_read_unlock(); 1255 return x; 1256 } 1257 EXPORT_SYMBOL(xfrm_input_state_lookup); 1258 1259 static struct xfrm_state *__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs *state_ptrs, 1260 u32 mark, 1261 const xfrm_address_t *daddr, 1262 const xfrm_address_t *saddr, 1263 u8 proto, unsigned short family) 1264 { 1265 unsigned int h = __xfrm_src_hash(daddr, saddr, family, state_ptrs->hmask); 1266 struct xfrm_state *x; 1267 1268 hlist_for_each_entry_rcu(x, state_ptrs->bysrc + h, bysrc) { 1269 if (x->props.family != family || 1270 x->id.proto != proto || 1271 !xfrm_addr_equal(&x->id.daddr, daddr, family) || 1272 !xfrm_addr_equal(&x->props.saddr, saddr, family)) 1273 continue; 1274 1275 if ((mark & x->mark.m) != x->mark.v) 1276 continue; 1277 if (!xfrm_state_hold_rcu(x)) 1278 continue; 1279 return x; 1280 } 1281 1282 return NULL; 1283 } 1284 1285 static inline struct xfrm_state * 1286 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family) 1287 { 1288 struct xfrm_hash_state_ptrs state_ptrs; 1289 struct net *net = xs_net(x); 1290 u32 mark = x->mark.v & x->mark.m; 1291 1292 xfrm_hash_ptrs_get(net, &state_ptrs); 1293 1294 if (use_spi) 1295 return __xfrm_state_lookup(&state_ptrs, mark, &x->id.daddr, 1296 x->id.spi, x->id.proto, family); 1297 else 1298 return __xfrm_state_lookup_byaddr(&state_ptrs, mark, 1299 &x->id.daddr, 1300 &x->props.saddr, 1301 x->id.proto, family); 1302 } 1303 1304 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision) 1305 { 1306 if (have_hash_collision && 1307 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax && 1308 net->xfrm.state_num > net->xfrm.state_hmask) 1309 schedule_work(&net->xfrm.state_hash_work); 1310 } 1311 1312 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x, 1313 const struct flowi *fl, unsigned short family, 1314 struct xfrm_state **best, int *acq_in_progress, 1315 int *error) 1316 { 1317 /* We need the cpu id just as a lookup key, 1318 * we don't require it to be stable. 1319 */ 1320 unsigned int pcpu_id = get_cpu(); 1321 put_cpu(); 1322 1323 /* Resolution logic: 1324 * 1. There is a valid state with matching selector. Done. 1325 * 2. Valid state with inappropriate selector. Skip. 1326 * 1327 * Entering area of "sysdeps". 1328 * 1329 * 3. If state is not valid, selector is temporary, it selects 1330 * only session which triggered previous resolution. Key 1331 * manager will do something to install a state with proper 1332 * selector. 1333 */ 1334 if (x->km.state == XFRM_STATE_VALID) { 1335 if ((x->sel.family && 1336 (x->sel.family != family || 1337 !xfrm_selector_match(&x->sel, fl, family))) || 1338 !security_xfrm_state_pol_flow_match(x, pol, 1339 &fl->u.__fl_common)) 1340 return; 1341 1342 if (x->pcpu_num != UINT_MAX && x->pcpu_num != pcpu_id) 1343 return; 1344 1345 if (!*best || 1346 ((*best)->pcpu_num == UINT_MAX && x->pcpu_num == pcpu_id) || 1347 (*best)->km.dying > x->km.dying || 1348 ((*best)->km.dying == x->km.dying && 1349 (*best)->curlft.add_time < x->curlft.add_time)) 1350 *best = x; 1351 } else if (x->km.state == XFRM_STATE_ACQ) { 1352 if (!*best || x->pcpu_num == pcpu_id) 1353 *acq_in_progress = 1; 1354 } else if (x->km.state == XFRM_STATE_ERROR || 1355 x->km.state == XFRM_STATE_EXPIRED) { 1356 if ((!x->sel.family || 1357 (x->sel.family == family && 1358 xfrm_selector_match(&x->sel, fl, family))) && 1359 security_xfrm_state_pol_flow_match(x, pol, 1360 &fl->u.__fl_common)) 1361 *error = -ESRCH; 1362 } 1363 } 1364 1365 struct xfrm_state * 1366 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, 1367 const struct flowi *fl, struct xfrm_tmpl *tmpl, 1368 struct xfrm_policy *pol, int *err, 1369 unsigned short family, u32 if_id) 1370 { 1371 static xfrm_address_t saddr_wildcard = { }; 1372 struct xfrm_hash_state_ptrs state_ptrs; 1373 struct net *net = xp_net(pol); 1374 unsigned int h, h_wildcard; 1375 struct xfrm_state *x, *x0, *to_put; 1376 int acquire_in_progress = 0; 1377 int error = 0; 1378 struct xfrm_state *best = NULL; 1379 u32 mark = pol->mark.v & pol->mark.m; 1380 unsigned short encap_family = tmpl->encap_family; 1381 unsigned int sequence; 1382 struct km_event c; 1383 unsigned int pcpu_id; 1384 bool cached = false; 1385 1386 /* We need the cpu id just as a lookup key, 1387 * we don't require it to be stable. 1388 */ 1389 pcpu_id = get_cpu(); 1390 put_cpu(); 1391 1392 to_put = NULL; 1393 1394 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); 1395 1396 rcu_read_lock(); 1397 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) { 1398 if (x->props.family == encap_family && 1399 x->props.reqid == tmpl->reqid && 1400 (mark & x->mark.m) == x->mark.v && 1401 x->if_id == if_id && 1402 !(x->props.flags & XFRM_STATE_WILDRECV) && 1403 xfrm_state_addr_check(x, daddr, saddr, encap_family) && 1404 tmpl->mode == x->props.mode && 1405 tmpl->id.proto == x->id.proto && 1406 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1407 xfrm_state_look_at(pol, x, fl, encap_family, 1408 &best, &acquire_in_progress, &error); 1409 } 1410 1411 if (best) 1412 goto cached; 1413 1414 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) { 1415 if (x->props.family == encap_family && 1416 x->props.reqid == tmpl->reqid && 1417 (mark & x->mark.m) == x->mark.v && 1418 x->if_id == if_id && 1419 !(x->props.flags & XFRM_STATE_WILDRECV) && 1420 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) && 1421 tmpl->mode == x->props.mode && 1422 tmpl->id.proto == x->id.proto && 1423 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1424 xfrm_state_look_at(pol, x, fl, family, 1425 &best, &acquire_in_progress, &error); 1426 } 1427 1428 cached: 1429 cached = true; 1430 if (best) 1431 goto found; 1432 else if (error) 1433 best = NULL; 1434 else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */ 1435 WARN_ON(1); 1436 1437 xfrm_hash_ptrs_get(net, &state_ptrs); 1438 1439 h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask); 1440 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) { 1441 #ifdef CONFIG_XFRM_OFFLOAD 1442 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { 1443 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) 1444 /* HW states are in the head of list, there is 1445 * no need to iterate further. 1446 */ 1447 break; 1448 1449 /* Packet offload: both policy and SA should 1450 * have same device. 1451 */ 1452 if (pol->xdo.dev != x->xso.dev) 1453 continue; 1454 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 1455 /* Skip HW policy for SW lookups */ 1456 continue; 1457 #endif 1458 if (x->props.family == encap_family && 1459 x->props.reqid == tmpl->reqid && 1460 (mark & x->mark.m) == x->mark.v && 1461 x->if_id == if_id && 1462 !(x->props.flags & XFRM_STATE_WILDRECV) && 1463 xfrm_state_addr_check(x, daddr, saddr, encap_family) && 1464 tmpl->mode == x->props.mode && 1465 tmpl->id.proto == x->id.proto && 1466 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1467 xfrm_state_look_at(pol, x, fl, family, 1468 &best, &acquire_in_progress, &error); 1469 } 1470 if (best || acquire_in_progress) 1471 goto found; 1472 1473 h_wildcard = __xfrm_dst_hash(daddr, &saddr_wildcard, tmpl->reqid, 1474 encap_family, state_ptrs.hmask); 1475 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h_wildcard, bydst) { 1476 #ifdef CONFIG_XFRM_OFFLOAD 1477 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { 1478 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET) 1479 /* HW states are in the head of list, there is 1480 * no need to iterate further. 1481 */ 1482 break; 1483 1484 /* Packet offload: both policy and SA should 1485 * have same device. 1486 */ 1487 if (pol->xdo.dev != x->xso.dev) 1488 continue; 1489 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 1490 /* Skip HW policy for SW lookups */ 1491 continue; 1492 #endif 1493 if (x->props.family == encap_family && 1494 x->props.reqid == tmpl->reqid && 1495 (mark & x->mark.m) == x->mark.v && 1496 x->if_id == if_id && 1497 !(x->props.flags & XFRM_STATE_WILDRECV) && 1498 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) && 1499 tmpl->mode == x->props.mode && 1500 tmpl->id.proto == x->id.proto && 1501 (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1502 xfrm_state_look_at(pol, x, fl, family, 1503 &best, &acquire_in_progress, &error); 1504 } 1505 1506 found: 1507 if (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || 1508 (best && (best->pcpu_num == pcpu_id))) 1509 x = best; 1510 1511 if (!x && !error && !acquire_in_progress) { 1512 if (tmpl->id.spi && 1513 (x0 = __xfrm_state_lookup_all(&state_ptrs, mark, daddr, 1514 tmpl->id.spi, tmpl->id.proto, 1515 encap_family, 1516 &pol->xdo)) != NULL) { 1517 to_put = x0; 1518 error = -EEXIST; 1519 goto out; 1520 } 1521 1522 c.net = net; 1523 /* If the KMs have no listeners (yet...), avoid allocating an SA 1524 * for each and every packet - garbage collection might not 1525 * handle the flood. 1526 */ 1527 if (!km_is_alive(&c)) { 1528 error = -ESRCH; 1529 goto out; 1530 } 1531 1532 x = xfrm_state_alloc(net); 1533 if (x == NULL) { 1534 error = -ENOMEM; 1535 goto out; 1536 } 1537 /* Initialize temporary state matching only 1538 * to current session. */ 1539 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family); 1540 memcpy(&x->mark, &pol->mark, sizeof(x->mark)); 1541 x->if_id = if_id; 1542 if ((pol->flags & XFRM_POLICY_CPU_ACQUIRE) && best) 1543 x->pcpu_num = pcpu_id; 1544 1545 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid); 1546 if (error) { 1547 x->km.state = XFRM_STATE_DEAD; 1548 to_put = x; 1549 x = NULL; 1550 goto out; 1551 } 1552 #ifdef CONFIG_XFRM_OFFLOAD 1553 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) { 1554 struct xfrm_dev_offload *xdo = &pol->xdo; 1555 struct xfrm_dev_offload *xso = &x->xso; 1556 1557 xso->type = XFRM_DEV_OFFLOAD_PACKET; 1558 xso->dir = xdo->dir; 1559 xso->dev = xdo->dev; 1560 xso->real_dev = xdo->real_dev; 1561 xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ; 1562 netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC); 1563 error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL); 1564 if (error) { 1565 xso->dir = 0; 1566 netdev_put(xso->dev, &xso->dev_tracker); 1567 xso->dev = NULL; 1568 xso->real_dev = NULL; 1569 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; 1570 x->km.state = XFRM_STATE_DEAD; 1571 to_put = x; 1572 x = NULL; 1573 goto out; 1574 } 1575 } 1576 #endif 1577 if (km_query(x, tmpl, pol) == 0) { 1578 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1579 x->km.state = XFRM_STATE_ACQ; 1580 x->dir = XFRM_SA_DIR_OUT; 1581 list_add(&x->km.all, &net->xfrm.state_all); 1582 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); 1583 XFRM_STATE_INSERT(bydst, &x->bydst, 1584 net->xfrm.state_bydst + h, 1585 x->xso.type); 1586 h = xfrm_src_hash(net, daddr, saddr, encap_family); 1587 XFRM_STATE_INSERT(bysrc, &x->bysrc, 1588 net->xfrm.state_bysrc + h, 1589 x->xso.type); 1590 INIT_HLIST_NODE(&x->state_cache); 1591 if (x->id.spi) { 1592 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); 1593 XFRM_STATE_INSERT(byspi, &x->byspi, 1594 net->xfrm.state_byspi + h, 1595 x->xso.type); 1596 } 1597 if (x->km.seq) { 1598 h = xfrm_seq_hash(net, x->km.seq); 1599 XFRM_STATE_INSERT(byseq, &x->byseq, 1600 net->xfrm.state_byseq + h, 1601 x->xso.type); 1602 } 1603 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1604 hrtimer_start(&x->mtimer, 1605 ktime_set(net->xfrm.sysctl_acq_expires, 0), 1606 HRTIMER_MODE_REL_SOFT); 1607 net->xfrm.state_num++; 1608 xfrm_hash_grow_check(net, x->bydst.next != NULL); 1609 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1610 } else { 1611 #ifdef CONFIG_XFRM_OFFLOAD 1612 struct xfrm_dev_offload *xso = &x->xso; 1613 1614 if (xso->type == XFRM_DEV_OFFLOAD_PACKET) { 1615 xfrm_dev_state_delete(x); 1616 xfrm_dev_state_free(x); 1617 } 1618 #endif 1619 x->km.state = XFRM_STATE_DEAD; 1620 to_put = x; 1621 x = NULL; 1622 error = -ESRCH; 1623 } 1624 1625 /* Use the already installed 'fallback' while the CPU-specific 1626 * SA acquire is handled*/ 1627 if (best) 1628 x = best; 1629 } 1630 out: 1631 if (x) { 1632 if (!xfrm_state_hold_rcu(x)) { 1633 *err = -EAGAIN; 1634 x = NULL; 1635 } 1636 } else { 1637 *err = acquire_in_progress ? -EAGAIN : error; 1638 } 1639 1640 if (x && x->km.state == XFRM_STATE_VALID && !cached && 1641 (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || x->pcpu_num == pcpu_id)) { 1642 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1643 if (hlist_unhashed(&x->state_cache)) 1644 hlist_add_head_rcu(&x->state_cache, &pol->state_cache_list); 1645 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1646 } 1647 1648 rcu_read_unlock(); 1649 if (to_put) 1650 xfrm_state_put(to_put); 1651 1652 if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) { 1653 *err = -EAGAIN; 1654 if (x) { 1655 xfrm_state_put(x); 1656 x = NULL; 1657 } 1658 } 1659 1660 return x; 1661 } 1662 1663 struct xfrm_state * 1664 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id, 1665 xfrm_address_t *daddr, xfrm_address_t *saddr, 1666 unsigned short family, u8 mode, u8 proto, u32 reqid) 1667 { 1668 unsigned int h; 1669 struct xfrm_state *rx = NULL, *x = NULL; 1670 1671 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1672 h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 1673 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 1674 if (x->props.family == family && 1675 x->props.reqid == reqid && 1676 (mark & x->mark.m) == x->mark.v && 1677 x->if_id == if_id && 1678 !(x->props.flags & XFRM_STATE_WILDRECV) && 1679 xfrm_state_addr_check(x, daddr, saddr, family) && 1680 mode == x->props.mode && 1681 proto == x->id.proto && 1682 x->km.state == XFRM_STATE_VALID) { 1683 rx = x; 1684 break; 1685 } 1686 } 1687 1688 if (rx) 1689 xfrm_state_hold(rx); 1690 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1691 1692 1693 return rx; 1694 } 1695 EXPORT_SYMBOL(xfrm_stateonly_find); 1696 1697 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, 1698 unsigned short family) 1699 { 1700 struct xfrm_state *x; 1701 struct xfrm_state_walk *w; 1702 1703 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1704 list_for_each_entry(w, &net->xfrm.state_all, all) { 1705 x = container_of(w, struct xfrm_state, km); 1706 if (x->props.family != family || 1707 x->id.spi != spi) 1708 continue; 1709 1710 xfrm_state_hold(x); 1711 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1712 return x; 1713 } 1714 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1715 return NULL; 1716 } 1717 EXPORT_SYMBOL(xfrm_state_lookup_byspi); 1718 1719 static void __xfrm_state_insert(struct xfrm_state *x) 1720 { 1721 struct net *net = xs_net(x); 1722 unsigned int h; 1723 1724 list_add(&x->km.all, &net->xfrm.state_all); 1725 1726 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, 1727 x->props.reqid, x->props.family); 1728 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h, 1729 x->xso.type); 1730 1731 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family); 1732 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h, 1733 x->xso.type); 1734 1735 if (x->id.spi) { 1736 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, 1737 x->props.family); 1738 1739 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, 1740 x->xso.type); 1741 } 1742 1743 if (x->km.seq) { 1744 h = xfrm_seq_hash(net, x->km.seq); 1745 1746 XFRM_STATE_INSERT(byseq, &x->byseq, net->xfrm.state_byseq + h, 1747 x->xso.type); 1748 } 1749 1750 hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT); 1751 if (x->replay_maxage) 1752 mod_timer(&x->rtimer, jiffies + x->replay_maxage); 1753 1754 net->xfrm.state_num++; 1755 1756 xfrm_hash_grow_check(net, x->bydst.next != NULL); 1757 xfrm_nat_keepalive_state_updated(x); 1758 } 1759 1760 /* net->xfrm.xfrm_state_lock is held */ 1761 static void __xfrm_state_bump_genids(struct xfrm_state *xnew) 1762 { 1763 struct net *net = xs_net(xnew); 1764 unsigned short family = xnew->props.family; 1765 u32 reqid = xnew->props.reqid; 1766 struct xfrm_state *x; 1767 unsigned int h; 1768 u32 mark = xnew->mark.v & xnew->mark.m; 1769 u32 if_id = xnew->if_id; 1770 u32 cpu_id = xnew->pcpu_num; 1771 1772 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family); 1773 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 1774 if (x->props.family == family && 1775 x->props.reqid == reqid && 1776 x->if_id == if_id && 1777 x->pcpu_num == cpu_id && 1778 (mark & x->mark.m) == x->mark.v && 1779 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) && 1780 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family)) 1781 x->genid++; 1782 } 1783 } 1784 1785 void xfrm_state_insert(struct xfrm_state *x) 1786 { 1787 struct net *net = xs_net(x); 1788 1789 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1790 __xfrm_state_bump_genids(x); 1791 __xfrm_state_insert(x); 1792 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1793 } 1794 EXPORT_SYMBOL(xfrm_state_insert); 1795 1796 /* net->xfrm.xfrm_state_lock is held */ 1797 static struct xfrm_state *__find_acq_core(struct net *net, 1798 const struct xfrm_mark *m, 1799 unsigned short family, u8 mode, 1800 u32 reqid, u32 if_id, u32 pcpu_num, u8 proto, 1801 const xfrm_address_t *daddr, 1802 const xfrm_address_t *saddr, 1803 int create) 1804 { 1805 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family); 1806 struct xfrm_state *x; 1807 u32 mark = m->v & m->m; 1808 1809 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 1810 if (x->props.reqid != reqid || 1811 x->props.mode != mode || 1812 x->props.family != family || 1813 x->km.state != XFRM_STATE_ACQ || 1814 x->id.spi != 0 || 1815 x->id.proto != proto || 1816 (mark & x->mark.m) != x->mark.v || 1817 x->pcpu_num != pcpu_num || 1818 !xfrm_addr_equal(&x->id.daddr, daddr, family) || 1819 !xfrm_addr_equal(&x->props.saddr, saddr, family)) 1820 continue; 1821 1822 xfrm_state_hold(x); 1823 return x; 1824 } 1825 1826 if (!create) 1827 return NULL; 1828 1829 x = xfrm_state_alloc(net); 1830 if (likely(x)) { 1831 switch (family) { 1832 case AF_INET: 1833 x->sel.daddr.a4 = daddr->a4; 1834 x->sel.saddr.a4 = saddr->a4; 1835 x->sel.prefixlen_d = 32; 1836 x->sel.prefixlen_s = 32; 1837 x->props.saddr.a4 = saddr->a4; 1838 x->id.daddr.a4 = daddr->a4; 1839 break; 1840 1841 case AF_INET6: 1842 x->sel.daddr.in6 = daddr->in6; 1843 x->sel.saddr.in6 = saddr->in6; 1844 x->sel.prefixlen_d = 128; 1845 x->sel.prefixlen_s = 128; 1846 x->props.saddr.in6 = saddr->in6; 1847 x->id.daddr.in6 = daddr->in6; 1848 break; 1849 } 1850 1851 x->pcpu_num = pcpu_num; 1852 x->km.state = XFRM_STATE_ACQ; 1853 x->id.proto = proto; 1854 x->props.family = family; 1855 x->props.mode = mode; 1856 x->props.reqid = reqid; 1857 x->if_id = if_id; 1858 x->mark.v = m->v; 1859 x->mark.m = m->m; 1860 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; 1861 xfrm_state_hold(x); 1862 hrtimer_start(&x->mtimer, 1863 ktime_set(net->xfrm.sysctl_acq_expires, 0), 1864 HRTIMER_MODE_REL_SOFT); 1865 list_add(&x->km.all, &net->xfrm.state_all); 1866 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h, 1867 x->xso.type); 1868 h = xfrm_src_hash(net, daddr, saddr, family); 1869 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h, 1870 x->xso.type); 1871 1872 net->xfrm.state_num++; 1873 1874 xfrm_hash_grow_check(net, x->bydst.next != NULL); 1875 } 1876 1877 return x; 1878 } 1879 1880 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num); 1881 1882 int xfrm_state_add(struct xfrm_state *x) 1883 { 1884 struct net *net = xs_net(x); 1885 struct xfrm_state *x1, *to_put; 1886 int family; 1887 int err; 1888 u32 mark = x->mark.v & x->mark.m; 1889 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 1890 1891 family = x->props.family; 1892 1893 to_put = NULL; 1894 1895 spin_lock_bh(&net->xfrm.xfrm_state_lock); 1896 1897 x1 = __xfrm_state_locate(x, use_spi, family); 1898 if (x1) { 1899 to_put = x1; 1900 x1 = NULL; 1901 err = -EEXIST; 1902 goto out; 1903 } 1904 1905 if (use_spi && x->km.seq) { 1906 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq, x->pcpu_num); 1907 if (x1 && ((x1->id.proto != x->id.proto) || 1908 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) { 1909 to_put = x1; 1910 x1 = NULL; 1911 } 1912 } 1913 1914 if (use_spi && !x1) 1915 x1 = __find_acq_core(net, &x->mark, family, x->props.mode, 1916 x->props.reqid, x->if_id, x->pcpu_num, x->id.proto, 1917 &x->id.daddr, &x->props.saddr, 0); 1918 1919 __xfrm_state_bump_genids(x); 1920 __xfrm_state_insert(x); 1921 err = 0; 1922 1923 out: 1924 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1925 1926 if (x1) { 1927 xfrm_state_delete(x1); 1928 xfrm_state_put(x1); 1929 } 1930 1931 if (to_put) 1932 xfrm_state_put(to_put); 1933 1934 return err; 1935 } 1936 EXPORT_SYMBOL(xfrm_state_add); 1937 1938 #ifdef CONFIG_XFRM_MIGRATE 1939 static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security) 1940 { 1941 struct xfrm_user_sec_ctx *uctx; 1942 int size = sizeof(*uctx) + security->ctx_len; 1943 int err; 1944 1945 uctx = kmalloc(size, GFP_KERNEL); 1946 if (!uctx) 1947 return -ENOMEM; 1948 1949 uctx->exttype = XFRMA_SEC_CTX; 1950 uctx->len = size; 1951 uctx->ctx_doi = security->ctx_doi; 1952 uctx->ctx_alg = security->ctx_alg; 1953 uctx->ctx_len = security->ctx_len; 1954 memcpy(uctx + 1, security->ctx_str, security->ctx_len); 1955 err = security_xfrm_state_alloc(x, uctx); 1956 kfree(uctx); 1957 if (err) 1958 return err; 1959 1960 return 0; 1961 } 1962 1963 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, 1964 struct xfrm_encap_tmpl *encap) 1965 { 1966 struct net *net = xs_net(orig); 1967 struct xfrm_state *x = xfrm_state_alloc(net); 1968 if (!x) 1969 goto out; 1970 1971 memcpy(&x->id, &orig->id, sizeof(x->id)); 1972 memcpy(&x->sel, &orig->sel, sizeof(x->sel)); 1973 memcpy(&x->lft, &orig->lft, sizeof(x->lft)); 1974 x->props.mode = orig->props.mode; 1975 x->props.replay_window = orig->props.replay_window; 1976 x->props.reqid = orig->props.reqid; 1977 x->props.family = orig->props.family; 1978 x->props.saddr = orig->props.saddr; 1979 1980 if (orig->aalg) { 1981 x->aalg = xfrm_algo_auth_clone(orig->aalg); 1982 if (!x->aalg) 1983 goto error; 1984 } 1985 x->props.aalgo = orig->props.aalgo; 1986 1987 if (orig->aead) { 1988 x->aead = xfrm_algo_aead_clone(orig->aead); 1989 x->geniv = orig->geniv; 1990 if (!x->aead) 1991 goto error; 1992 } 1993 if (orig->ealg) { 1994 x->ealg = xfrm_algo_clone(orig->ealg); 1995 if (!x->ealg) 1996 goto error; 1997 } 1998 x->props.ealgo = orig->props.ealgo; 1999 2000 if (orig->calg) { 2001 x->calg = xfrm_algo_clone(orig->calg); 2002 if (!x->calg) 2003 goto error; 2004 } 2005 x->props.calgo = orig->props.calgo; 2006 2007 if (encap || orig->encap) { 2008 if (encap) 2009 x->encap = kmemdup(encap, sizeof(*x->encap), 2010 GFP_KERNEL); 2011 else 2012 x->encap = kmemdup(orig->encap, sizeof(*x->encap), 2013 GFP_KERNEL); 2014 2015 if (!x->encap) 2016 goto error; 2017 } 2018 2019 if (orig->security) 2020 if (clone_security(x, orig->security)) 2021 goto error; 2022 2023 if (orig->coaddr) { 2024 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr), 2025 GFP_KERNEL); 2026 if (!x->coaddr) 2027 goto error; 2028 } 2029 2030 if (orig->replay_esn) { 2031 if (xfrm_replay_clone(x, orig)) 2032 goto error; 2033 } 2034 2035 memcpy(&x->mark, &orig->mark, sizeof(x->mark)); 2036 memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark)); 2037 2038 x->props.flags = orig->props.flags; 2039 x->props.extra_flags = orig->props.extra_flags; 2040 2041 x->pcpu_num = orig->pcpu_num; 2042 x->if_id = orig->if_id; 2043 x->tfcpad = orig->tfcpad; 2044 x->replay_maxdiff = orig->replay_maxdiff; 2045 x->replay_maxage = orig->replay_maxage; 2046 memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft)); 2047 x->km.state = orig->km.state; 2048 x->km.seq = orig->km.seq; 2049 x->replay = orig->replay; 2050 x->preplay = orig->preplay; 2051 x->mapping_maxage = orig->mapping_maxage; 2052 x->lastused = orig->lastused; 2053 x->new_mapping = 0; 2054 x->new_mapping_sport = 0; 2055 x->dir = orig->dir; 2056 2057 x->mode_cbs = orig->mode_cbs; 2058 if (x->mode_cbs && x->mode_cbs->clone_state) { 2059 if (x->mode_cbs->clone_state(x, orig)) 2060 goto error; 2061 } 2062 2063 return x; 2064 2065 error: 2066 xfrm_state_put(x); 2067 out: 2068 return NULL; 2069 } 2070 2071 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net, 2072 u32 if_id) 2073 { 2074 unsigned int h; 2075 struct xfrm_state *x = NULL; 2076 2077 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2078 2079 if (m->reqid) { 2080 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr, 2081 m->reqid, m->old_family); 2082 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) { 2083 if (x->props.mode != m->mode || 2084 x->id.proto != m->proto) 2085 continue; 2086 if (m->reqid && x->props.reqid != m->reqid) 2087 continue; 2088 if (if_id != 0 && x->if_id != if_id) 2089 continue; 2090 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, 2091 m->old_family) || 2092 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, 2093 m->old_family)) 2094 continue; 2095 xfrm_state_hold(x); 2096 break; 2097 } 2098 } else { 2099 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr, 2100 m->old_family); 2101 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) { 2102 if (x->props.mode != m->mode || 2103 x->id.proto != m->proto) 2104 continue; 2105 if (if_id != 0 && x->if_id != if_id) 2106 continue; 2107 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr, 2108 m->old_family) || 2109 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr, 2110 m->old_family)) 2111 continue; 2112 xfrm_state_hold(x); 2113 break; 2114 } 2115 } 2116 2117 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2118 2119 return x; 2120 } 2121 EXPORT_SYMBOL(xfrm_migrate_state_find); 2122 2123 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, 2124 struct xfrm_migrate *m, 2125 struct xfrm_encap_tmpl *encap) 2126 { 2127 struct xfrm_state *xc; 2128 2129 xc = xfrm_state_clone(x, encap); 2130 if (!xc) 2131 return NULL; 2132 2133 xc->props.family = m->new_family; 2134 2135 if (xfrm_init_state(xc) < 0) 2136 goto error; 2137 2138 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr)); 2139 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr)); 2140 2141 /* add state */ 2142 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) { 2143 /* a care is needed when the destination address of the 2144 state is to be updated as it is a part of triplet */ 2145 xfrm_state_insert(xc); 2146 } else { 2147 if (xfrm_state_add(xc) < 0) 2148 goto error; 2149 } 2150 2151 return xc; 2152 error: 2153 xfrm_state_put(xc); 2154 return NULL; 2155 } 2156 EXPORT_SYMBOL(xfrm_state_migrate); 2157 #endif 2158 2159 int xfrm_state_update(struct xfrm_state *x) 2160 { 2161 struct xfrm_state *x1, *to_put; 2162 int err; 2163 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY); 2164 struct net *net = xs_net(x); 2165 2166 to_put = NULL; 2167 2168 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2169 x1 = __xfrm_state_locate(x, use_spi, x->props.family); 2170 2171 err = -ESRCH; 2172 if (!x1) 2173 goto out; 2174 2175 if (xfrm_state_kern(x1)) { 2176 to_put = x1; 2177 err = -EEXIST; 2178 goto out; 2179 } 2180 2181 if (x1->km.state == XFRM_STATE_ACQ) { 2182 if (x->dir && x1->dir != x->dir) 2183 goto out; 2184 2185 __xfrm_state_insert(x); 2186 x = NULL; 2187 } else { 2188 if (x1->dir != x->dir) 2189 goto out; 2190 } 2191 err = 0; 2192 2193 out: 2194 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2195 2196 if (to_put) 2197 xfrm_state_put(to_put); 2198 2199 if (err) 2200 return err; 2201 2202 if (!x) { 2203 xfrm_state_delete(x1); 2204 xfrm_state_put(x1); 2205 return 0; 2206 } 2207 2208 err = -EINVAL; 2209 spin_lock_bh(&x1->lock); 2210 if (likely(x1->km.state == XFRM_STATE_VALID)) { 2211 if (x->encap && x1->encap && 2212 x->encap->encap_type == x1->encap->encap_type) 2213 memcpy(x1->encap, x->encap, sizeof(*x1->encap)); 2214 else if (x->encap || x1->encap) 2215 goto fail; 2216 2217 if (x->coaddr && x1->coaddr) { 2218 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr)); 2219 } 2220 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel))) 2221 memcpy(&x1->sel, &x->sel, sizeof(x1->sel)); 2222 memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); 2223 x1->km.dying = 0; 2224 2225 hrtimer_start(&x1->mtimer, ktime_set(1, 0), 2226 HRTIMER_MODE_REL_SOFT); 2227 if (READ_ONCE(x1->curlft.use_time)) 2228 xfrm_state_check_expire(x1); 2229 2230 if (x->props.smark.m || x->props.smark.v || x->if_id) { 2231 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2232 2233 if (x->props.smark.m || x->props.smark.v) 2234 x1->props.smark = x->props.smark; 2235 2236 if (x->if_id) 2237 x1->if_id = x->if_id; 2238 2239 __xfrm_state_bump_genids(x1); 2240 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2241 } 2242 2243 err = 0; 2244 x->km.state = XFRM_STATE_DEAD; 2245 __xfrm_state_put(x); 2246 } 2247 2248 fail: 2249 spin_unlock_bh(&x1->lock); 2250 2251 xfrm_state_put(x1); 2252 2253 return err; 2254 } 2255 EXPORT_SYMBOL(xfrm_state_update); 2256 2257 int xfrm_state_check_expire(struct xfrm_state *x) 2258 { 2259 xfrm_dev_state_update_stats(x); 2260 2261 if (!READ_ONCE(x->curlft.use_time)) 2262 WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds()); 2263 2264 if (x->curlft.bytes >= x->lft.hard_byte_limit || 2265 x->curlft.packets >= x->lft.hard_packet_limit) { 2266 x->km.state = XFRM_STATE_EXPIRED; 2267 hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT); 2268 return -EINVAL; 2269 } 2270 2271 if (!x->km.dying && 2272 (x->curlft.bytes >= x->lft.soft_byte_limit || 2273 x->curlft.packets >= x->lft.soft_packet_limit)) { 2274 x->km.dying = 1; 2275 km_state_expired(x, 0, 0); 2276 } 2277 return 0; 2278 } 2279 EXPORT_SYMBOL(xfrm_state_check_expire); 2280 2281 void xfrm_state_update_stats(struct net *net) 2282 { 2283 struct xfrm_state *x; 2284 int i; 2285 2286 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2287 for (i = 0; i <= net->xfrm.state_hmask; i++) { 2288 hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst) 2289 xfrm_dev_state_update_stats(x); 2290 } 2291 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2292 } 2293 2294 struct xfrm_state * 2295 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi, 2296 u8 proto, unsigned short family) 2297 { 2298 struct xfrm_hash_state_ptrs state_ptrs; 2299 struct xfrm_state *x; 2300 2301 rcu_read_lock(); 2302 xfrm_hash_ptrs_get(net, &state_ptrs); 2303 2304 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family); 2305 rcu_read_unlock(); 2306 return x; 2307 } 2308 EXPORT_SYMBOL(xfrm_state_lookup); 2309 2310 struct xfrm_state * 2311 xfrm_state_lookup_byaddr(struct net *net, u32 mark, 2312 const xfrm_address_t *daddr, const xfrm_address_t *saddr, 2313 u8 proto, unsigned short family) 2314 { 2315 struct xfrm_hash_state_ptrs state_ptrs; 2316 struct xfrm_state *x; 2317 2318 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2319 2320 xfrm_hash_ptrs_get(net, &state_ptrs); 2321 2322 x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family); 2323 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2324 return x; 2325 } 2326 EXPORT_SYMBOL(xfrm_state_lookup_byaddr); 2327 2328 struct xfrm_state * 2329 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid, 2330 u32 if_id, u32 pcpu_num, u8 proto, const xfrm_address_t *daddr, 2331 const xfrm_address_t *saddr, int create, unsigned short family) 2332 { 2333 struct xfrm_state *x; 2334 2335 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2336 x = __find_acq_core(net, mark, family, mode, reqid, if_id, pcpu_num, 2337 proto, daddr, saddr, create); 2338 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2339 2340 return x; 2341 } 2342 EXPORT_SYMBOL(xfrm_find_acq); 2343 2344 #ifdef CONFIG_XFRM_SUB_POLICY 2345 #if IS_ENABLED(CONFIG_IPV6) 2346 /* distribution counting sort function for xfrm_state and xfrm_tmpl */ 2347 static void 2348 __xfrm6_sort(void **dst, void **src, int n, 2349 int (*cmp)(const void *p), int maxclass) 2350 { 2351 int count[XFRM_MAX_DEPTH] = { }; 2352 int class[XFRM_MAX_DEPTH]; 2353 int i; 2354 2355 for (i = 0; i < n; i++) { 2356 int c = cmp(src[i]); 2357 2358 class[i] = c; 2359 count[c]++; 2360 } 2361 2362 for (i = 2; i < maxclass; i++) 2363 count[i] += count[i - 1]; 2364 2365 for (i = 0; i < n; i++) { 2366 dst[count[class[i] - 1]++] = src[i]; 2367 src[i] = NULL; 2368 } 2369 } 2370 2371 /* Rule for xfrm_state: 2372 * 2373 * rule 1: select IPsec transport except AH 2374 * rule 2: select MIPv6 RO or inbound trigger 2375 * rule 3: select IPsec transport AH 2376 * rule 4: select IPsec tunnel 2377 * rule 5: others 2378 */ 2379 static int __xfrm6_state_sort_cmp(const void *p) 2380 { 2381 const struct xfrm_state *v = p; 2382 2383 switch (v->props.mode) { 2384 case XFRM_MODE_TRANSPORT: 2385 if (v->id.proto != IPPROTO_AH) 2386 return 1; 2387 else 2388 return 3; 2389 #if IS_ENABLED(CONFIG_IPV6_MIP6) 2390 case XFRM_MODE_ROUTEOPTIMIZATION: 2391 case XFRM_MODE_IN_TRIGGER: 2392 return 2; 2393 #endif 2394 case XFRM_MODE_TUNNEL: 2395 case XFRM_MODE_BEET: 2396 case XFRM_MODE_IPTFS: 2397 return 4; 2398 } 2399 return 5; 2400 } 2401 2402 /* Rule for xfrm_tmpl: 2403 * 2404 * rule 1: select IPsec transport 2405 * rule 2: select MIPv6 RO or inbound trigger 2406 * rule 3: select IPsec tunnel 2407 * rule 4: others 2408 */ 2409 static int __xfrm6_tmpl_sort_cmp(const void *p) 2410 { 2411 const struct xfrm_tmpl *v = p; 2412 2413 switch (v->mode) { 2414 case XFRM_MODE_TRANSPORT: 2415 return 1; 2416 #if IS_ENABLED(CONFIG_IPV6_MIP6) 2417 case XFRM_MODE_ROUTEOPTIMIZATION: 2418 case XFRM_MODE_IN_TRIGGER: 2419 return 2; 2420 #endif 2421 case XFRM_MODE_TUNNEL: 2422 case XFRM_MODE_BEET: 2423 case XFRM_MODE_IPTFS: 2424 return 3; 2425 } 2426 return 4; 2427 } 2428 #else 2429 static inline int __xfrm6_state_sort_cmp(const void *p) { return 5; } 2430 static inline int __xfrm6_tmpl_sort_cmp(const void *p) { return 4; } 2431 2432 static inline void 2433 __xfrm6_sort(void **dst, void **src, int n, 2434 int (*cmp)(const void *p), int maxclass) 2435 { 2436 int i; 2437 2438 for (i = 0; i < n; i++) 2439 dst[i] = src[i]; 2440 } 2441 #endif /* CONFIG_IPV6 */ 2442 2443 void 2444 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, 2445 unsigned short family) 2446 { 2447 int i; 2448 2449 if (family == AF_INET6) 2450 __xfrm6_sort((void **)dst, (void **)src, n, 2451 __xfrm6_tmpl_sort_cmp, 5); 2452 else 2453 for (i = 0; i < n; i++) 2454 dst[i] = src[i]; 2455 } 2456 2457 void 2458 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, 2459 unsigned short family) 2460 { 2461 int i; 2462 2463 if (family == AF_INET6) 2464 __xfrm6_sort((void **)dst, (void **)src, n, 2465 __xfrm6_state_sort_cmp, 6); 2466 else 2467 for (i = 0; i < n; i++) 2468 dst[i] = src[i]; 2469 } 2470 #endif 2471 2472 /* Silly enough, but I'm lazy to build resolution list */ 2473 2474 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num) 2475 { 2476 unsigned int h = xfrm_seq_hash(net, seq); 2477 struct xfrm_state *x; 2478 2479 hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) { 2480 if (x->km.seq == seq && 2481 (mark & x->mark.m) == x->mark.v && 2482 x->pcpu_num == pcpu_num && 2483 x->km.state == XFRM_STATE_ACQ) { 2484 xfrm_state_hold(x); 2485 return x; 2486 } 2487 } 2488 2489 return NULL; 2490 } 2491 2492 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num) 2493 { 2494 struct xfrm_state *x; 2495 2496 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2497 x = __xfrm_find_acq_byseq(net, mark, seq, pcpu_num); 2498 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2499 return x; 2500 } 2501 EXPORT_SYMBOL(xfrm_find_acq_byseq); 2502 2503 u32 xfrm_get_acqseq(void) 2504 { 2505 u32 res; 2506 static atomic_t acqseq; 2507 2508 do { 2509 res = atomic_inc_return(&acqseq); 2510 } while (!res); 2511 2512 return res; 2513 } 2514 EXPORT_SYMBOL(xfrm_get_acqseq); 2515 2516 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack) 2517 { 2518 switch (proto) { 2519 case IPPROTO_AH: 2520 case IPPROTO_ESP: 2521 break; 2522 2523 case IPPROTO_COMP: 2524 /* IPCOMP spi is 16-bits. */ 2525 if (max >= 0x10000) { 2526 NL_SET_ERR_MSG(extack, "IPCOMP SPI must be <= 65535"); 2527 return -EINVAL; 2528 } 2529 break; 2530 2531 default: 2532 NL_SET_ERR_MSG(extack, "Invalid protocol, must be one of AH, ESP, IPCOMP"); 2533 return -EINVAL; 2534 } 2535 2536 if (min > max) { 2537 NL_SET_ERR_MSG(extack, "Invalid SPI range: min > max"); 2538 return -EINVAL; 2539 } 2540 2541 return 0; 2542 } 2543 EXPORT_SYMBOL(verify_spi_info); 2544 2545 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high, 2546 struct netlink_ext_ack *extack) 2547 { 2548 struct net *net = xs_net(x); 2549 unsigned int h; 2550 struct xfrm_state *x0; 2551 int err = -ENOENT; 2552 __be32 minspi = htonl(low); 2553 __be32 maxspi = htonl(high); 2554 __be32 newspi = 0; 2555 u32 mark = x->mark.v & x->mark.m; 2556 2557 spin_lock_bh(&x->lock); 2558 if (x->km.state == XFRM_STATE_DEAD) { 2559 NL_SET_ERR_MSG(extack, "Target ACQUIRE is in DEAD state"); 2560 goto unlock; 2561 } 2562 2563 err = 0; 2564 if (x->id.spi) 2565 goto unlock; 2566 2567 err = -ENOENT; 2568 2569 if (minspi == maxspi) { 2570 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family); 2571 if (x0) { 2572 NL_SET_ERR_MSG(extack, "Requested SPI is already in use"); 2573 xfrm_state_put(x0); 2574 goto unlock; 2575 } 2576 newspi = minspi; 2577 } else { 2578 u32 spi = 0; 2579 for (h = 0; h < high-low+1; h++) { 2580 spi = get_random_u32_inclusive(low, high); 2581 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); 2582 if (x0 == NULL) { 2583 newspi = htonl(spi); 2584 break; 2585 } 2586 xfrm_state_put(x0); 2587 } 2588 } 2589 if (newspi) { 2590 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2591 x->id.spi = newspi; 2592 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); 2593 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h, 2594 x->xso.type); 2595 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2596 2597 err = 0; 2598 } else { 2599 NL_SET_ERR_MSG(extack, "No SPI available in the requested range"); 2600 } 2601 2602 unlock: 2603 spin_unlock_bh(&x->lock); 2604 2605 return err; 2606 } 2607 EXPORT_SYMBOL(xfrm_alloc_spi); 2608 2609 static bool __xfrm_state_filter_match(struct xfrm_state *x, 2610 struct xfrm_address_filter *filter) 2611 { 2612 if (filter) { 2613 if ((filter->family == AF_INET || 2614 filter->family == AF_INET6) && 2615 x->props.family != filter->family) 2616 return false; 2617 2618 return addr_match(&x->props.saddr, &filter->saddr, 2619 filter->splen) && 2620 addr_match(&x->id.daddr, &filter->daddr, 2621 filter->dplen); 2622 } 2623 return true; 2624 } 2625 2626 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, 2627 int (*func)(struct xfrm_state *, int, void*), 2628 void *data) 2629 { 2630 struct xfrm_state *state; 2631 struct xfrm_state_walk *x; 2632 int err = 0; 2633 2634 if (walk->seq != 0 && list_empty(&walk->all)) 2635 return 0; 2636 2637 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2638 if (list_empty(&walk->all)) 2639 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all); 2640 else 2641 x = list_first_entry(&walk->all, struct xfrm_state_walk, all); 2642 list_for_each_entry_from(x, &net->xfrm.state_all, all) { 2643 if (x->state == XFRM_STATE_DEAD) 2644 continue; 2645 state = container_of(x, struct xfrm_state, km); 2646 if (!xfrm_id_proto_match(state->id.proto, walk->proto)) 2647 continue; 2648 if (!__xfrm_state_filter_match(state, walk->filter)) 2649 continue; 2650 err = func(state, walk->seq, data); 2651 if (err) { 2652 list_move_tail(&walk->all, &x->all); 2653 goto out; 2654 } 2655 walk->seq++; 2656 } 2657 if (walk->seq == 0) { 2658 err = -ENOENT; 2659 goto out; 2660 } 2661 list_del_init(&walk->all); 2662 out: 2663 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2664 return err; 2665 } 2666 EXPORT_SYMBOL(xfrm_state_walk); 2667 2668 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto, 2669 struct xfrm_address_filter *filter) 2670 { 2671 INIT_LIST_HEAD(&walk->all); 2672 walk->proto = proto; 2673 walk->state = XFRM_STATE_DEAD; 2674 walk->seq = 0; 2675 walk->filter = filter; 2676 } 2677 EXPORT_SYMBOL(xfrm_state_walk_init); 2678 2679 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net) 2680 { 2681 kfree(walk->filter); 2682 2683 if (list_empty(&walk->all)) 2684 return; 2685 2686 spin_lock_bh(&net->xfrm.xfrm_state_lock); 2687 list_del(&walk->all); 2688 spin_unlock_bh(&net->xfrm.xfrm_state_lock); 2689 } 2690 EXPORT_SYMBOL(xfrm_state_walk_done); 2691 2692 static void xfrm_replay_timer_handler(struct timer_list *t) 2693 { 2694 struct xfrm_state *x = from_timer(x, t, rtimer); 2695 2696 spin_lock(&x->lock); 2697 2698 if (x->km.state == XFRM_STATE_VALID) { 2699 if (xfrm_aevent_is_on(xs_net(x))) 2700 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT); 2701 else 2702 x->xflags |= XFRM_TIME_DEFER; 2703 } 2704 2705 spin_unlock(&x->lock); 2706 } 2707 2708 static LIST_HEAD(xfrm_km_list); 2709 2710 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2711 { 2712 struct xfrm_mgr *km; 2713 2714 rcu_read_lock(); 2715 list_for_each_entry_rcu(km, &xfrm_km_list, list) 2716 if (km->notify_policy) 2717 km->notify_policy(xp, dir, c); 2718 rcu_read_unlock(); 2719 } 2720 2721 void km_state_notify(struct xfrm_state *x, const struct km_event *c) 2722 { 2723 struct xfrm_mgr *km; 2724 rcu_read_lock(); 2725 list_for_each_entry_rcu(km, &xfrm_km_list, list) 2726 if (km->notify) 2727 km->notify(x, c); 2728 rcu_read_unlock(); 2729 } 2730 2731 EXPORT_SYMBOL(km_policy_notify); 2732 EXPORT_SYMBOL(km_state_notify); 2733 2734 void km_state_expired(struct xfrm_state *x, int hard, u32 portid) 2735 { 2736 struct km_event c; 2737 2738 c.data.hard = hard; 2739 c.portid = portid; 2740 c.event = XFRM_MSG_EXPIRE; 2741 km_state_notify(x, &c); 2742 } 2743 2744 EXPORT_SYMBOL(km_state_expired); 2745 /* 2746 * We send to all registered managers regardless of failure 2747 * We are happy with one success 2748 */ 2749 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol) 2750 { 2751 int err = -EINVAL, acqret; 2752 struct xfrm_mgr *km; 2753 2754 rcu_read_lock(); 2755 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2756 acqret = km->acquire(x, t, pol); 2757 if (!acqret) 2758 err = acqret; 2759 } 2760 rcu_read_unlock(); 2761 return err; 2762 } 2763 EXPORT_SYMBOL(km_query); 2764 2765 static int __km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) 2766 { 2767 int err = -EINVAL; 2768 struct xfrm_mgr *km; 2769 2770 rcu_read_lock(); 2771 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2772 if (km->new_mapping) 2773 err = km->new_mapping(x, ipaddr, sport); 2774 if (!err) 2775 break; 2776 } 2777 rcu_read_unlock(); 2778 return err; 2779 } 2780 2781 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport) 2782 { 2783 int ret = 0; 2784 2785 if (x->mapping_maxage) { 2786 if ((jiffies / HZ - x->new_mapping) > x->mapping_maxage || 2787 x->new_mapping_sport != sport) { 2788 x->new_mapping_sport = sport; 2789 x->new_mapping = jiffies / HZ; 2790 ret = __km_new_mapping(x, ipaddr, sport); 2791 } 2792 } else { 2793 ret = __km_new_mapping(x, ipaddr, sport); 2794 } 2795 2796 return ret; 2797 } 2798 EXPORT_SYMBOL(km_new_mapping); 2799 2800 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid) 2801 { 2802 struct km_event c; 2803 2804 c.data.hard = hard; 2805 c.portid = portid; 2806 c.event = XFRM_MSG_POLEXPIRE; 2807 km_policy_notify(pol, dir, &c); 2808 } 2809 EXPORT_SYMBOL(km_policy_expired); 2810 2811 #ifdef CONFIG_XFRM_MIGRATE 2812 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2813 const struct xfrm_migrate *m, int num_migrate, 2814 const struct xfrm_kmaddress *k, 2815 const struct xfrm_encap_tmpl *encap) 2816 { 2817 int err = -EINVAL; 2818 int ret; 2819 struct xfrm_mgr *km; 2820 2821 rcu_read_lock(); 2822 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2823 if (km->migrate) { 2824 ret = km->migrate(sel, dir, type, m, num_migrate, k, 2825 encap); 2826 if (!ret) 2827 err = ret; 2828 } 2829 } 2830 rcu_read_unlock(); 2831 return err; 2832 } 2833 EXPORT_SYMBOL(km_migrate); 2834 #endif 2835 2836 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr) 2837 { 2838 int err = -EINVAL; 2839 int ret; 2840 struct xfrm_mgr *km; 2841 2842 rcu_read_lock(); 2843 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2844 if (km->report) { 2845 ret = km->report(net, proto, sel, addr); 2846 if (!ret) 2847 err = ret; 2848 } 2849 } 2850 rcu_read_unlock(); 2851 return err; 2852 } 2853 EXPORT_SYMBOL(km_report); 2854 2855 static bool km_is_alive(const struct km_event *c) 2856 { 2857 struct xfrm_mgr *km; 2858 bool is_alive = false; 2859 2860 rcu_read_lock(); 2861 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2862 if (km->is_alive && km->is_alive(c)) { 2863 is_alive = true; 2864 break; 2865 } 2866 } 2867 rcu_read_unlock(); 2868 2869 return is_alive; 2870 } 2871 2872 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT) 2873 static DEFINE_SPINLOCK(xfrm_translator_lock); 2874 static struct xfrm_translator __rcu *xfrm_translator; 2875 2876 struct xfrm_translator *xfrm_get_translator(void) 2877 { 2878 struct xfrm_translator *xtr; 2879 2880 rcu_read_lock(); 2881 xtr = rcu_dereference(xfrm_translator); 2882 if (unlikely(!xtr)) 2883 goto out; 2884 if (!try_module_get(xtr->owner)) 2885 xtr = NULL; 2886 out: 2887 rcu_read_unlock(); 2888 return xtr; 2889 } 2890 EXPORT_SYMBOL_GPL(xfrm_get_translator); 2891 2892 void xfrm_put_translator(struct xfrm_translator *xtr) 2893 { 2894 module_put(xtr->owner); 2895 } 2896 EXPORT_SYMBOL_GPL(xfrm_put_translator); 2897 2898 int xfrm_register_translator(struct xfrm_translator *xtr) 2899 { 2900 int err = 0; 2901 2902 spin_lock_bh(&xfrm_translator_lock); 2903 if (unlikely(xfrm_translator != NULL)) 2904 err = -EEXIST; 2905 else 2906 rcu_assign_pointer(xfrm_translator, xtr); 2907 spin_unlock_bh(&xfrm_translator_lock); 2908 2909 return err; 2910 } 2911 EXPORT_SYMBOL_GPL(xfrm_register_translator); 2912 2913 int xfrm_unregister_translator(struct xfrm_translator *xtr) 2914 { 2915 int err = 0; 2916 2917 spin_lock_bh(&xfrm_translator_lock); 2918 if (likely(xfrm_translator != NULL)) { 2919 if (rcu_access_pointer(xfrm_translator) != xtr) 2920 err = -EINVAL; 2921 else 2922 RCU_INIT_POINTER(xfrm_translator, NULL); 2923 } 2924 spin_unlock_bh(&xfrm_translator_lock); 2925 synchronize_rcu(); 2926 2927 return err; 2928 } 2929 EXPORT_SYMBOL_GPL(xfrm_unregister_translator); 2930 #endif 2931 2932 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen) 2933 { 2934 int err; 2935 u8 *data; 2936 struct xfrm_mgr *km; 2937 struct xfrm_policy *pol = NULL; 2938 2939 if (sockptr_is_null(optval) && !optlen) { 2940 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL); 2941 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL); 2942 __sk_dst_reset(sk); 2943 return 0; 2944 } 2945 2946 if (optlen <= 0 || optlen > PAGE_SIZE) 2947 return -EMSGSIZE; 2948 2949 data = memdup_sockptr(optval, optlen); 2950 if (IS_ERR(data)) 2951 return PTR_ERR(data); 2952 2953 if (in_compat_syscall()) { 2954 struct xfrm_translator *xtr = xfrm_get_translator(); 2955 2956 if (!xtr) { 2957 kfree(data); 2958 return -EOPNOTSUPP; 2959 } 2960 2961 err = xtr->xlate_user_policy_sockptr(&data, optlen); 2962 xfrm_put_translator(xtr); 2963 if (err) { 2964 kfree(data); 2965 return err; 2966 } 2967 } 2968 2969 err = -EINVAL; 2970 rcu_read_lock(); 2971 list_for_each_entry_rcu(km, &xfrm_km_list, list) { 2972 pol = km->compile_policy(sk, optname, data, 2973 optlen, &err); 2974 if (err >= 0) 2975 break; 2976 } 2977 rcu_read_unlock(); 2978 2979 if (err >= 0) { 2980 xfrm_sk_policy_insert(sk, err, pol); 2981 xfrm_pol_put(pol); 2982 __sk_dst_reset(sk); 2983 err = 0; 2984 } 2985 2986 kfree(data); 2987 return err; 2988 } 2989 EXPORT_SYMBOL(xfrm_user_policy); 2990 2991 static DEFINE_SPINLOCK(xfrm_km_lock); 2992 2993 void xfrm_register_km(struct xfrm_mgr *km) 2994 { 2995 spin_lock_bh(&xfrm_km_lock); 2996 list_add_tail_rcu(&km->list, &xfrm_km_list); 2997 spin_unlock_bh(&xfrm_km_lock); 2998 } 2999 EXPORT_SYMBOL(xfrm_register_km); 3000 3001 void xfrm_unregister_km(struct xfrm_mgr *km) 3002 { 3003 spin_lock_bh(&xfrm_km_lock); 3004 list_del_rcu(&km->list); 3005 spin_unlock_bh(&xfrm_km_lock); 3006 synchronize_rcu(); 3007 } 3008 EXPORT_SYMBOL(xfrm_unregister_km); 3009 3010 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo) 3011 { 3012 int err = 0; 3013 3014 if (WARN_ON(afinfo->family >= NPROTO)) 3015 return -EAFNOSUPPORT; 3016 3017 spin_lock_bh(&xfrm_state_afinfo_lock); 3018 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL)) 3019 err = -EEXIST; 3020 else 3021 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo); 3022 spin_unlock_bh(&xfrm_state_afinfo_lock); 3023 return err; 3024 } 3025 EXPORT_SYMBOL(xfrm_state_register_afinfo); 3026 3027 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo) 3028 { 3029 int err = 0, family = afinfo->family; 3030 3031 if (WARN_ON(family >= NPROTO)) 3032 return -EAFNOSUPPORT; 3033 3034 spin_lock_bh(&xfrm_state_afinfo_lock); 3035 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) { 3036 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo) 3037 err = -EINVAL; 3038 else 3039 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL); 3040 } 3041 spin_unlock_bh(&xfrm_state_afinfo_lock); 3042 synchronize_rcu(); 3043 return err; 3044 } 3045 EXPORT_SYMBOL(xfrm_state_unregister_afinfo); 3046 3047 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family) 3048 { 3049 if (unlikely(family >= NPROTO)) 3050 return NULL; 3051 3052 return rcu_dereference(xfrm_state_afinfo[family]); 3053 } 3054 EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu); 3055 3056 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family) 3057 { 3058 struct xfrm_state_afinfo *afinfo; 3059 if (unlikely(family >= NPROTO)) 3060 return NULL; 3061 rcu_read_lock(); 3062 afinfo = rcu_dereference(xfrm_state_afinfo[family]); 3063 if (unlikely(!afinfo)) 3064 rcu_read_unlock(); 3065 return afinfo; 3066 } 3067 3068 void xfrm_flush_gc(void) 3069 { 3070 flush_work(&xfrm_state_gc_work); 3071 } 3072 EXPORT_SYMBOL(xfrm_flush_gc); 3073 3074 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */ 3075 void xfrm_state_delete_tunnel(struct xfrm_state *x) 3076 { 3077 if (x->tunnel) { 3078 struct xfrm_state *t = x->tunnel; 3079 3080 if (atomic_read(&t->tunnel_users) == 2) 3081 xfrm_state_delete(t); 3082 atomic_dec(&t->tunnel_users); 3083 xfrm_state_put_sync(t); 3084 x->tunnel = NULL; 3085 } 3086 } 3087 EXPORT_SYMBOL(xfrm_state_delete_tunnel); 3088 3089 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) 3090 { 3091 const struct xfrm_type *type = READ_ONCE(x->type); 3092 struct crypto_aead *aead; 3093 u32 blksize, net_adj = 0; 3094 3095 if (x->km.state != XFRM_STATE_VALID || 3096 !type || type->proto != IPPROTO_ESP) 3097 return mtu - x->props.header_len; 3098 3099 aead = x->data; 3100 blksize = ALIGN(crypto_aead_blocksize(aead), 4); 3101 3102 switch (x->props.mode) { 3103 case XFRM_MODE_TRANSPORT: 3104 case XFRM_MODE_BEET: 3105 if (x->props.family == AF_INET) 3106 net_adj = sizeof(struct iphdr); 3107 else if (x->props.family == AF_INET6) 3108 net_adj = sizeof(struct ipv6hdr); 3109 break; 3110 case XFRM_MODE_TUNNEL: 3111 break; 3112 default: 3113 if (x->mode_cbs && x->mode_cbs->get_inner_mtu) 3114 return x->mode_cbs->get_inner_mtu(x, mtu); 3115 3116 WARN_ON_ONCE(1); 3117 break; 3118 } 3119 3120 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) - 3121 net_adj) & ~(blksize - 1)) + net_adj - 2; 3122 } 3123 EXPORT_SYMBOL_GPL(xfrm_state_mtu); 3124 3125 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, 3126 struct netlink_ext_ack *extack) 3127 { 3128 const struct xfrm_mode *inner_mode; 3129 const struct xfrm_mode *outer_mode; 3130 int family = x->props.family; 3131 int err; 3132 3133 if (family == AF_INET && 3134 READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)) 3135 x->props.flags |= XFRM_STATE_NOPMTUDISC; 3136 3137 err = -EPROTONOSUPPORT; 3138 3139 if (x->sel.family != AF_UNSPEC) { 3140 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family); 3141 if (inner_mode == NULL) { 3142 NL_SET_ERR_MSG(extack, "Requested mode not found"); 3143 goto error; 3144 } 3145 3146 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) && 3147 family != x->sel.family) { 3148 NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate a change of family"); 3149 goto error; 3150 } 3151 3152 x->inner_mode = *inner_mode; 3153 } else { 3154 const struct xfrm_mode *inner_mode_iaf; 3155 int iafamily = AF_INET; 3156 3157 inner_mode = xfrm_get_mode(x->props.mode, x->props.family); 3158 if (inner_mode == NULL) { 3159 NL_SET_ERR_MSG(extack, "Requested mode not found"); 3160 goto error; 3161 } 3162 3163 x->inner_mode = *inner_mode; 3164 3165 if (x->props.family == AF_INET) 3166 iafamily = AF_INET6; 3167 3168 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily); 3169 if (inner_mode_iaf) { 3170 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL) 3171 x->inner_mode_iaf = *inner_mode_iaf; 3172 } 3173 } 3174 3175 x->type = xfrm_get_type(x->id.proto, family); 3176 if (x->type == NULL) { 3177 NL_SET_ERR_MSG(extack, "Requested type not found"); 3178 goto error; 3179 } 3180 3181 x->type_offload = xfrm_get_type_offload(x->id.proto, family, offload); 3182 3183 err = x->type->init_state(x, extack); 3184 if (err) 3185 goto error; 3186 3187 outer_mode = xfrm_get_mode(x->props.mode, family); 3188 if (!outer_mode) { 3189 NL_SET_ERR_MSG(extack, "Requested mode not found"); 3190 err = -EPROTONOSUPPORT; 3191 goto error; 3192 } 3193 3194 x->outer_mode = *outer_mode; 3195 if (init_replay) { 3196 err = xfrm_init_replay(x, extack); 3197 if (err) 3198 goto error; 3199 } 3200 3201 if (x->nat_keepalive_interval) { 3202 if (x->dir != XFRM_SA_DIR_OUT) { 3203 NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs"); 3204 err = -EINVAL; 3205 goto error; 3206 } 3207 3208 if (!x->encap || x->encap->encap_type != UDP_ENCAP_ESPINUDP) { 3209 NL_SET_ERR_MSG(extack, 3210 "NAT keepalive is only supported for UDP encapsulation"); 3211 err = -EINVAL; 3212 goto error; 3213 } 3214 } 3215 3216 x->mode_cbs = xfrm_get_mode_cbs(x->props.mode); 3217 if (x->mode_cbs) { 3218 if (x->mode_cbs->init_state) 3219 err = x->mode_cbs->init_state(x); 3220 module_put(x->mode_cbs->owner); 3221 } 3222 error: 3223 return err; 3224 } 3225 3226 EXPORT_SYMBOL(__xfrm_init_state); 3227 3228 int xfrm_init_state(struct xfrm_state *x) 3229 { 3230 int err; 3231 3232 err = __xfrm_init_state(x, true, false, NULL); 3233 if (!err) 3234 x->km.state = XFRM_STATE_VALID; 3235 3236 return err; 3237 } 3238 3239 EXPORT_SYMBOL(xfrm_init_state); 3240 3241 int __net_init xfrm_state_init(struct net *net) 3242 { 3243 unsigned int sz; 3244 3245 if (net_eq(net, &init_net)) 3246 xfrm_state_cache = KMEM_CACHE(xfrm_state, 3247 SLAB_HWCACHE_ALIGN | SLAB_PANIC); 3248 3249 INIT_LIST_HEAD(&net->xfrm.state_all); 3250 3251 sz = sizeof(struct hlist_head) * 8; 3252 3253 net->xfrm.state_bydst = xfrm_hash_alloc(sz); 3254 if (!net->xfrm.state_bydst) 3255 goto out_bydst; 3256 net->xfrm.state_bysrc = xfrm_hash_alloc(sz); 3257 if (!net->xfrm.state_bysrc) 3258 goto out_bysrc; 3259 net->xfrm.state_byspi = xfrm_hash_alloc(sz); 3260 if (!net->xfrm.state_byspi) 3261 goto out_byspi; 3262 net->xfrm.state_byseq = xfrm_hash_alloc(sz); 3263 if (!net->xfrm.state_byseq) 3264 goto out_byseq; 3265 3266 net->xfrm.state_cache_input = alloc_percpu(struct hlist_head); 3267 if (!net->xfrm.state_cache_input) 3268 goto out_state_cache_input; 3269 3270 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1); 3271 3272 net->xfrm.state_num = 0; 3273 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize); 3274 spin_lock_init(&net->xfrm.xfrm_state_lock); 3275 seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation, 3276 &net->xfrm.xfrm_state_lock); 3277 return 0; 3278 3279 out_state_cache_input: 3280 xfrm_hash_free(net->xfrm.state_byseq, sz); 3281 out_byseq: 3282 xfrm_hash_free(net->xfrm.state_byspi, sz); 3283 out_byspi: 3284 xfrm_hash_free(net->xfrm.state_bysrc, sz); 3285 out_bysrc: 3286 xfrm_hash_free(net->xfrm.state_bydst, sz); 3287 out_bydst: 3288 return -ENOMEM; 3289 } 3290 3291 void xfrm_state_fini(struct net *net) 3292 { 3293 unsigned int sz; 3294 3295 flush_work(&net->xfrm.state_hash_work); 3296 flush_work(&xfrm_state_gc_work); 3297 xfrm_state_flush(net, 0, false, true); 3298 3299 WARN_ON(!list_empty(&net->xfrm.state_all)); 3300 3301 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head); 3302 WARN_ON(!hlist_empty(net->xfrm.state_byseq)); 3303 xfrm_hash_free(net->xfrm.state_byseq, sz); 3304 WARN_ON(!hlist_empty(net->xfrm.state_byspi)); 3305 xfrm_hash_free(net->xfrm.state_byspi, sz); 3306 WARN_ON(!hlist_empty(net->xfrm.state_bysrc)); 3307 xfrm_hash_free(net->xfrm.state_bysrc, sz); 3308 WARN_ON(!hlist_empty(net->xfrm.state_bydst)); 3309 xfrm_hash_free(net->xfrm.state_bydst, sz); 3310 free_percpu(net->xfrm.state_cache_input); 3311 } 3312 3313 #ifdef CONFIG_AUDITSYSCALL 3314 static void xfrm_audit_helper_sainfo(struct xfrm_state *x, 3315 struct audit_buffer *audit_buf) 3316 { 3317 struct xfrm_sec_ctx *ctx = x->security; 3318 u32 spi = ntohl(x->id.spi); 3319 3320 if (ctx) 3321 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 3322 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 3323 3324 switch (x->props.family) { 3325 case AF_INET: 3326 audit_log_format(audit_buf, " src=%pI4 dst=%pI4", 3327 &x->props.saddr.a4, &x->id.daddr.a4); 3328 break; 3329 case AF_INET6: 3330 audit_log_format(audit_buf, " src=%pI6 dst=%pI6", 3331 x->props.saddr.a6, x->id.daddr.a6); 3332 break; 3333 } 3334 3335 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); 3336 } 3337 3338 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family, 3339 struct audit_buffer *audit_buf) 3340 { 3341 const struct iphdr *iph4; 3342 const struct ipv6hdr *iph6; 3343 3344 switch (family) { 3345 case AF_INET: 3346 iph4 = ip_hdr(skb); 3347 audit_log_format(audit_buf, " src=%pI4 dst=%pI4", 3348 &iph4->saddr, &iph4->daddr); 3349 break; 3350 case AF_INET6: 3351 iph6 = ipv6_hdr(skb); 3352 audit_log_format(audit_buf, 3353 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x", 3354 &iph6->saddr, &iph6->daddr, 3355 iph6->flow_lbl[0] & 0x0f, 3356 iph6->flow_lbl[1], 3357 iph6->flow_lbl[2]); 3358 break; 3359 } 3360 } 3361 3362 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid) 3363 { 3364 struct audit_buffer *audit_buf; 3365 3366 audit_buf = xfrm_audit_start("SAD-add"); 3367 if (audit_buf == NULL) 3368 return; 3369 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 3370 xfrm_audit_helper_sainfo(x, audit_buf); 3371 audit_log_format(audit_buf, " res=%u", result); 3372 audit_log_end(audit_buf); 3373 } 3374 EXPORT_SYMBOL_GPL(xfrm_audit_state_add); 3375 3376 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid) 3377 { 3378 struct audit_buffer *audit_buf; 3379 3380 audit_buf = xfrm_audit_start("SAD-delete"); 3381 if (audit_buf == NULL) 3382 return; 3383 xfrm_audit_helper_usrinfo(task_valid, audit_buf); 3384 xfrm_audit_helper_sainfo(x, audit_buf); 3385 audit_log_format(audit_buf, " res=%u", result); 3386 audit_log_end(audit_buf); 3387 } 3388 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete); 3389 3390 void xfrm_audit_state_replay_overflow(struct xfrm_state *x, 3391 struct sk_buff *skb) 3392 { 3393 struct audit_buffer *audit_buf; 3394 u32 spi; 3395 3396 audit_buf = xfrm_audit_start("SA-replay-overflow"); 3397 if (audit_buf == NULL) 3398 return; 3399 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 3400 /* don't record the sequence number because it's inherent in this kind 3401 * of audit message */ 3402 spi = ntohl(x->id.spi); 3403 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi); 3404 audit_log_end(audit_buf); 3405 } 3406 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow); 3407 3408 void xfrm_audit_state_replay(struct xfrm_state *x, 3409 struct sk_buff *skb, __be32 net_seq) 3410 { 3411 struct audit_buffer *audit_buf; 3412 u32 spi; 3413 3414 audit_buf = xfrm_audit_start("SA-replayed-pkt"); 3415 if (audit_buf == NULL) 3416 return; 3417 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 3418 spi = ntohl(x->id.spi); 3419 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 3420 spi, spi, ntohl(net_seq)); 3421 audit_log_end(audit_buf); 3422 } 3423 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay); 3424 3425 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family) 3426 { 3427 struct audit_buffer *audit_buf; 3428 3429 audit_buf = xfrm_audit_start("SA-notfound"); 3430 if (audit_buf == NULL) 3431 return; 3432 xfrm_audit_helper_pktinfo(skb, family, audit_buf); 3433 audit_log_end(audit_buf); 3434 } 3435 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple); 3436 3437 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, 3438 __be32 net_spi, __be32 net_seq) 3439 { 3440 struct audit_buffer *audit_buf; 3441 u32 spi; 3442 3443 audit_buf = xfrm_audit_start("SA-notfound"); 3444 if (audit_buf == NULL) 3445 return; 3446 xfrm_audit_helper_pktinfo(skb, family, audit_buf); 3447 spi = ntohl(net_spi); 3448 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 3449 spi, spi, ntohl(net_seq)); 3450 audit_log_end(audit_buf); 3451 } 3452 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound); 3453 3454 void xfrm_audit_state_icvfail(struct xfrm_state *x, 3455 struct sk_buff *skb, u8 proto) 3456 { 3457 struct audit_buffer *audit_buf; 3458 __be32 net_spi; 3459 __be32 net_seq; 3460 3461 audit_buf = xfrm_audit_start("SA-icv-failure"); 3462 if (audit_buf == NULL) 3463 return; 3464 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf); 3465 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) { 3466 u32 spi = ntohl(net_spi); 3467 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u", 3468 spi, spi, ntohl(net_seq)); 3469 } 3470 audit_log_end(audit_buf); 3471 } 3472 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail); 3473 #endif /* CONFIG_AUDITSYSCALL */ 3474