1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _NET_XFRM_H 3 #define _NET_XFRM_H 4 5 #include <linux/compiler.h> 6 #include <linux/xfrm.h> 7 #include <linux/spinlock.h> 8 #include <linux/list.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/pfkeyv2.h> 12 #include <linux/ipsec.h> 13 #include <linux/in6.h> 14 #include <linux/mutex.h> 15 #include <linux/audit.h> 16 #include <linux/slab.h> 17 #include <linux/refcount.h> 18 #include <linux/sockptr.h> 19 20 #include <net/sock.h> 21 #include <net/dst.h> 22 #include <net/ip.h> 23 #include <net/route.h> 24 #include <net/ipv6.h> 25 #include <net/ip6_fib.h> 26 #include <net/flow.h> 27 #include <net/gro_cells.h> 28 29 #include <linux/interrupt.h> 30 31 #ifdef CONFIG_XFRM_STATISTICS 32 #include <net/snmp.h> 33 #endif 34 35 #define XFRM_PROTO_ESP 50 36 #define XFRM_PROTO_AH 51 37 #define XFRM_PROTO_COMP 108 38 #define XFRM_PROTO_IPIP 4 39 #define XFRM_PROTO_IPV6 41 40 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING 41 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS 42 43 #define XFRM_ALIGN4(len) (((len) + 3) & ~3) 44 #define XFRM_ALIGN8(len) (((len) + 7) & ~7) 45 #define MODULE_ALIAS_XFRM_MODE(family, encap) \ 46 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap)) 47 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \ 48 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto)) 49 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \ 50 MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto)) 51 52 #ifdef CONFIG_XFRM_STATISTICS 53 #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field) 54 #define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val) 55 #else 56 #define XFRM_INC_STATS(net, field) ((void)(net)) 57 #define XFRM_ADD_STATS(net, field, val) ((void)(net)) 58 #endif 59 60 61 /* Organization of SPD aka "XFRM rules" 62 ------------------------------------ 63 64 Basic objects: 65 - policy rule, struct xfrm_policy (=SPD entry) 66 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle) 67 - instance of a transformer, struct xfrm_state (=SA) 68 - template to clone xfrm_state, struct xfrm_tmpl 69 70 SPD is organized as hash table (for policies that meet minimum address prefix 71 length setting, net->xfrm.policy_hthresh). Other policies are stored in 72 lists, sorted into rbtree ordered by destination and source address networks. 73 See net/xfrm/xfrm_policy.c for details. 74 75 (To be compatible with existing pfkeyv2 implementations, 76 many rules with priority of 0x7fffffff are allowed to exist and 77 such rules are ordered in an unpredictable way, thanks to bsd folks.) 78 79 If "action" is "block", then we prohibit the flow, otherwise: 80 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise, 81 policy entry has list of up to XFRM_MAX_DEPTH transformations, 82 described by templates xfrm_tmpl. Each template is resolved 83 to a complete xfrm_state (see below) and we pack bundle of transformations 84 to a dst_entry returned to requester. 85 86 dst -. xfrm .-> xfrm_state #1 87 |---. child .-> dst -. xfrm .-> xfrm_state #2 88 |---. child .-> dst -. xfrm .-> xfrm_state #3 89 |---. child .-> NULL 90 91 92 Resolution of xrfm_tmpl 93 ----------------------- 94 Template contains: 95 1. ->mode Mode: transport or tunnel 96 2. ->id.proto Protocol: AH/ESP/IPCOMP 97 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode. 98 Q: allow to resolve security gateway? 99 4. ->id.spi If not zero, static SPI. 100 5. ->saddr Local tunnel endpoint, ignored for transport mode. 101 6. ->algos List of allowed algos. Plain bitmask now. 102 Q: ealgos, aalgos, calgos. What a mess... 103 7. ->share Sharing mode. 104 Q: how to implement private sharing mode? To add struct sock* to 105 flow id? 106 107 Having this template we search through SAD searching for entries 108 with appropriate mode/proto/algo, permitted by selector. 109 If no appropriate entry found, it is requested from key manager. 110 111 PROBLEMS: 112 Q: How to find all the bundles referring to a physical path for 113 PMTU discovery? Seems, dst should contain list of all parents... 114 and enter to infinite locking hierarchy disaster. 115 No! It is easier, we will not search for them, let them find us. 116 We add genid to each dst plus pointer to genid of raw IP route, 117 pmtu disc will update pmtu on raw IP route and increase its genid. 118 dst_check() will see this for top level and trigger resyncing 119 metrics. Plus, it will be made via sk->sk_dst_cache. Solved. 120 */ 121 122 struct xfrm_state_walk { 123 struct list_head all; 124 u8 state; 125 u8 dying; 126 u8 proto; 127 u32 seq; 128 struct xfrm_address_filter *filter; 129 }; 130 131 enum { 132 XFRM_DEV_OFFLOAD_IN = 1, 133 XFRM_DEV_OFFLOAD_OUT, 134 XFRM_DEV_OFFLOAD_FWD, 135 }; 136 137 enum { 138 XFRM_DEV_OFFLOAD_UNSPECIFIED, 139 XFRM_DEV_OFFLOAD_CRYPTO, 140 XFRM_DEV_OFFLOAD_PACKET, 141 }; 142 143 enum { 144 XFRM_DEV_OFFLOAD_FLAG_ACQ = 1, 145 }; 146 147 struct xfrm_dev_offload { 148 struct net_device *dev; 149 netdevice_tracker dev_tracker; 150 struct net_device *real_dev; 151 unsigned long offload_handle; 152 u8 dir : 2; 153 u8 type : 2; 154 u8 flags : 2; 155 }; 156 157 struct xfrm_mode { 158 u8 encap; 159 u8 family; 160 u8 flags; 161 }; 162 163 /* Flags for xfrm_mode. */ 164 enum { 165 XFRM_MODE_FLAG_TUNNEL = 1, 166 }; 167 168 enum xfrm_replay_mode { 169 XFRM_REPLAY_MODE_LEGACY, 170 XFRM_REPLAY_MODE_BMP, 171 XFRM_REPLAY_MODE_ESN, 172 }; 173 174 /* Full description of state of transformer. */ 175 struct xfrm_state { 176 possible_net_t xs_net; 177 union { 178 struct hlist_node gclist; 179 struct hlist_node bydst; 180 }; 181 union { 182 struct hlist_node dev_gclist; 183 struct hlist_node bysrc; 184 }; 185 struct hlist_node byspi; 186 struct hlist_node byseq; 187 188 refcount_t refcnt; 189 spinlock_t lock; 190 191 struct xfrm_id id; 192 struct xfrm_selector sel; 193 struct xfrm_mark mark; 194 u32 if_id; 195 u32 tfcpad; 196 197 u32 genid; 198 199 /* Key manager bits */ 200 struct xfrm_state_walk km; 201 202 /* Parameters of this state. */ 203 struct { 204 u32 reqid; 205 u8 mode; 206 u8 replay_window; 207 u8 aalgo, ealgo, calgo; 208 u8 flags; 209 u16 family; 210 xfrm_address_t saddr; 211 int header_len; 212 int trailer_len; 213 u32 extra_flags; 214 struct xfrm_mark smark; 215 } props; 216 217 struct xfrm_lifetime_cfg lft; 218 219 /* Data for transformer */ 220 struct xfrm_algo_auth *aalg; 221 struct xfrm_algo *ealg; 222 struct xfrm_algo *calg; 223 struct xfrm_algo_aead *aead; 224 const char *geniv; 225 226 /* mapping change rate limiting */ 227 __be16 new_mapping_sport; 228 u32 new_mapping; /* seconds */ 229 u32 mapping_maxage; /* seconds for input SA */ 230 231 /* Data for encapsulator */ 232 struct xfrm_encap_tmpl *encap; 233 struct sock __rcu *encap_sk; 234 235 /* NAT keepalive */ 236 u32 nat_keepalive_interval; /* seconds */ 237 time64_t nat_keepalive_expiration; 238 239 /* Data for care-of address */ 240 xfrm_address_t *coaddr; 241 242 /* IPComp needs an IPIP tunnel for handling uncompressed packets */ 243 struct xfrm_state *tunnel; 244 245 /* If a tunnel, number of users + 1 */ 246 atomic_t tunnel_users; 247 248 /* State for replay detection */ 249 struct xfrm_replay_state replay; 250 struct xfrm_replay_state_esn *replay_esn; 251 252 /* Replay detection state at the time we sent the last notification */ 253 struct xfrm_replay_state preplay; 254 struct xfrm_replay_state_esn *preplay_esn; 255 256 /* replay detection mode */ 257 enum xfrm_replay_mode repl_mode; 258 /* internal flag that only holds state for delayed aevent at the 259 * moment 260 */ 261 u32 xflags; 262 263 /* Replay detection notification settings */ 264 u32 replay_maxage; 265 u32 replay_maxdiff; 266 267 /* Replay detection notification timer */ 268 struct timer_list rtimer; 269 270 /* Statistics */ 271 struct xfrm_stats stats; 272 273 struct xfrm_lifetime_cur curlft; 274 struct hrtimer mtimer; 275 276 struct xfrm_dev_offload xso; 277 278 /* used to fix curlft->add_time when changing date */ 279 long saved_tmo; 280 281 /* Last used time */ 282 time64_t lastused; 283 284 struct page_frag xfrag; 285 286 /* Reference to data common to all the instances of this 287 * transformer. */ 288 const struct xfrm_type *type; 289 struct xfrm_mode inner_mode; 290 struct xfrm_mode inner_mode_iaf; 291 struct xfrm_mode outer_mode; 292 293 const struct xfrm_type_offload *type_offload; 294 295 /* Security context */ 296 struct xfrm_sec_ctx *security; 297 298 /* Private data of this transformer, format is opaque, 299 * interpreted by xfrm_type methods. */ 300 void *data; 301 u8 dir; 302 }; 303 304 static inline struct net *xs_net(struct xfrm_state *x) 305 { 306 return read_pnet(&x->xs_net); 307 } 308 309 /* xflags - make enum if more show up */ 310 #define XFRM_TIME_DEFER 1 311 #define XFRM_SOFT_EXPIRE 2 312 313 enum { 314 XFRM_STATE_VOID, 315 XFRM_STATE_ACQ, 316 XFRM_STATE_VALID, 317 XFRM_STATE_ERROR, 318 XFRM_STATE_EXPIRED, 319 XFRM_STATE_DEAD 320 }; 321 322 /* callback structure passed from either netlink or pfkey */ 323 struct km_event { 324 union { 325 u32 hard; 326 u32 proto; 327 u32 byid; 328 u32 aevent; 329 u32 type; 330 } data; 331 332 u32 seq; 333 u32 portid; 334 u32 event; 335 struct net *net; 336 }; 337 338 struct xfrm_if_decode_session_result { 339 struct net *net; 340 u32 if_id; 341 }; 342 343 struct xfrm_if_cb { 344 bool (*decode_session)(struct sk_buff *skb, 345 unsigned short family, 346 struct xfrm_if_decode_session_result *res); 347 }; 348 349 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb); 350 void xfrm_if_unregister_cb(void); 351 352 struct net_device; 353 struct xfrm_type; 354 struct xfrm_dst; 355 struct xfrm_policy_afinfo { 356 struct dst_ops *dst_ops; 357 struct dst_entry *(*dst_lookup)(struct net *net, 358 int tos, int oif, 359 const xfrm_address_t *saddr, 360 const xfrm_address_t *daddr, 361 u32 mark); 362 int (*get_saddr)(struct net *net, int oif, 363 xfrm_address_t *saddr, 364 xfrm_address_t *daddr, 365 u32 mark); 366 int (*fill_dst)(struct xfrm_dst *xdst, 367 struct net_device *dev, 368 const struct flowi *fl); 369 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig); 370 }; 371 372 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family); 373 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo); 374 void km_policy_notify(struct xfrm_policy *xp, int dir, 375 const struct km_event *c); 376 void km_state_notify(struct xfrm_state *x, const struct km_event *c); 377 378 struct xfrm_tmpl; 379 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, 380 struct xfrm_policy *pol); 381 void km_state_expired(struct xfrm_state *x, int hard, u32 portid); 382 int __xfrm_state_delete(struct xfrm_state *x); 383 384 struct xfrm_state_afinfo { 385 u8 family; 386 u8 proto; 387 388 const struct xfrm_type_offload *type_offload_esp; 389 390 const struct xfrm_type *type_esp; 391 const struct xfrm_type *type_ipip; 392 const struct xfrm_type *type_ipip6; 393 const struct xfrm_type *type_comp; 394 const struct xfrm_type *type_ah; 395 const struct xfrm_type *type_routing; 396 const struct xfrm_type *type_dstopts; 397 398 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); 399 int (*transport_finish)(struct sk_buff *skb, 400 int async); 401 void (*local_error)(struct sk_buff *skb, u32 mtu); 402 }; 403 404 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); 405 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); 406 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); 407 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family); 408 409 struct xfrm_input_afinfo { 410 u8 family; 411 bool is_ipip; 412 int (*callback)(struct sk_buff *skb, u8 protocol, 413 int err); 414 }; 415 416 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo); 417 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo); 418 419 void xfrm_flush_gc(void); 420 void xfrm_state_delete_tunnel(struct xfrm_state *x); 421 422 struct xfrm_type { 423 struct module *owner; 424 u8 proto; 425 u8 flags; 426 #define XFRM_TYPE_NON_FRAGMENT 1 427 #define XFRM_TYPE_REPLAY_PROT 2 428 #define XFRM_TYPE_LOCAL_COADDR 4 429 #define XFRM_TYPE_REMOTE_COADDR 8 430 431 int (*init_state)(struct xfrm_state *x, 432 struct netlink_ext_ack *extack); 433 void (*destructor)(struct xfrm_state *); 434 int (*input)(struct xfrm_state *, struct sk_buff *skb); 435 int (*output)(struct xfrm_state *, struct sk_buff *pskb); 436 int (*reject)(struct xfrm_state *, struct sk_buff *, 437 const struct flowi *); 438 }; 439 440 int xfrm_register_type(const struct xfrm_type *type, unsigned short family); 441 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); 442 443 struct xfrm_type_offload { 444 struct module *owner; 445 u8 proto; 446 void (*encap)(struct xfrm_state *, struct sk_buff *pskb); 447 int (*input_tail)(struct xfrm_state *x, struct sk_buff *skb); 448 int (*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features); 449 }; 450 451 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); 452 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); 453 454 static inline int xfrm_af2proto(unsigned int family) 455 { 456 switch(family) { 457 case AF_INET: 458 return IPPROTO_IPIP; 459 case AF_INET6: 460 return IPPROTO_IPV6; 461 default: 462 return 0; 463 } 464 } 465 466 static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto) 467 { 468 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) || 469 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6)) 470 return &x->inner_mode; 471 else 472 return &x->inner_mode_iaf; 473 } 474 475 struct xfrm_tmpl { 476 /* id in template is interpreted as: 477 * daddr - destination of tunnel, may be zero for transport mode. 478 * spi - zero to acquire spi. Not zero if spi is static, then 479 * daddr must be fixed too. 480 * proto - AH/ESP/IPCOMP 481 */ 482 struct xfrm_id id; 483 484 /* Source address of tunnel. Ignored, if it is not a tunnel. */ 485 xfrm_address_t saddr; 486 487 unsigned short encap_family; 488 489 u32 reqid; 490 491 /* Mode: transport, tunnel etc. */ 492 u8 mode; 493 494 /* Sharing mode: unique, this session only, this user only etc. */ 495 u8 share; 496 497 /* May skip this transfomration if no SA is found */ 498 u8 optional; 499 500 /* Skip aalgos/ealgos/calgos checks. */ 501 u8 allalgs; 502 503 /* Bit mask of algos allowed for acquisition */ 504 u32 aalgos; 505 u32 ealgos; 506 u32 calgos; 507 }; 508 509 #define XFRM_MAX_DEPTH 6 510 #define XFRM_MAX_OFFLOAD_DEPTH 1 511 512 struct xfrm_policy_walk_entry { 513 struct list_head all; 514 u8 dead; 515 }; 516 517 struct xfrm_policy_walk { 518 struct xfrm_policy_walk_entry walk; 519 u8 type; 520 u32 seq; 521 }; 522 523 struct xfrm_policy_queue { 524 struct sk_buff_head hold_queue; 525 struct timer_list hold_timer; 526 unsigned long timeout; 527 }; 528 529 /** 530 * struct xfrm_policy - xfrm policy 531 * @xp_net: network namespace the policy lives in 532 * @bydst: hlist node for SPD hash table or rbtree list 533 * @byidx: hlist node for index hash table 534 * @lock: serialize changes to policy structure members 535 * @refcnt: reference count, freed once it reaches 0 536 * @pos: kernel internal tie-breaker to determine age of policy 537 * @timer: timer 538 * @genid: generation, used to invalidate old policies 539 * @priority: priority, set by userspace 540 * @index: policy index (autogenerated) 541 * @if_id: virtual xfrm interface id 542 * @mark: packet mark 543 * @selector: selector 544 * @lft: liftime configuration data 545 * @curlft: liftime state 546 * @walk: list head on pernet policy list 547 * @polq: queue to hold packets while aqcuire operaion in progress 548 * @bydst_reinsert: policy tree node needs to be merged 549 * @type: XFRM_POLICY_TYPE_MAIN or _SUB 550 * @action: XFRM_POLICY_ALLOW or _BLOCK 551 * @flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP 552 * @xfrm_nr: number of used templates in @xfrm_vec 553 * @family: protocol family 554 * @security: SELinux security label 555 * @xfrm_vec: array of templates to resolve state 556 * @rcu: rcu head, used to defer memory release 557 * @xdo: hardware offload state 558 */ 559 struct xfrm_policy { 560 possible_net_t xp_net; 561 struct hlist_node bydst; 562 struct hlist_node byidx; 563 564 /* This lock only affects elements except for entry. */ 565 rwlock_t lock; 566 refcount_t refcnt; 567 u32 pos; 568 struct timer_list timer; 569 570 atomic_t genid; 571 u32 priority; 572 u32 index; 573 u32 if_id; 574 struct xfrm_mark mark; 575 struct xfrm_selector selector; 576 struct xfrm_lifetime_cfg lft; 577 struct xfrm_lifetime_cur curlft; 578 struct xfrm_policy_walk_entry walk; 579 struct xfrm_policy_queue polq; 580 bool bydst_reinsert; 581 u8 type; 582 u8 action; 583 u8 flags; 584 u8 xfrm_nr; 585 u16 family; 586 struct xfrm_sec_ctx *security; 587 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; 588 struct rcu_head rcu; 589 590 struct xfrm_dev_offload xdo; 591 }; 592 593 static inline struct net *xp_net(const struct xfrm_policy *xp) 594 { 595 return read_pnet(&xp->xp_net); 596 } 597 598 struct xfrm_kmaddress { 599 xfrm_address_t local; 600 xfrm_address_t remote; 601 u32 reserved; 602 u16 family; 603 }; 604 605 struct xfrm_migrate { 606 xfrm_address_t old_daddr; 607 xfrm_address_t old_saddr; 608 xfrm_address_t new_daddr; 609 xfrm_address_t new_saddr; 610 u8 proto; 611 u8 mode; 612 u16 reserved; 613 u32 reqid; 614 u16 old_family; 615 u16 new_family; 616 }; 617 618 #define XFRM_KM_TIMEOUT 30 619 /* what happened */ 620 #define XFRM_REPLAY_UPDATE XFRM_AE_CR 621 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE 622 623 /* default aevent timeout in units of 100ms */ 624 #define XFRM_AE_ETIME 10 625 /* Async Event timer multiplier */ 626 #define XFRM_AE_ETH_M 10 627 /* default seq threshold size */ 628 #define XFRM_AE_SEQT_SIZE 2 629 630 struct xfrm_mgr { 631 struct list_head list; 632 int (*notify)(struct xfrm_state *x, const struct km_event *c); 633 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp); 634 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir); 635 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); 636 int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c); 637 int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr); 638 int (*migrate)(const struct xfrm_selector *sel, 639 u8 dir, u8 type, 640 const struct xfrm_migrate *m, 641 int num_bundles, 642 const struct xfrm_kmaddress *k, 643 const struct xfrm_encap_tmpl *encap); 644 bool (*is_alive)(const struct km_event *c); 645 }; 646 647 void xfrm_register_km(struct xfrm_mgr *km); 648 void xfrm_unregister_km(struct xfrm_mgr *km); 649 650 struct xfrm_tunnel_skb_cb { 651 union { 652 struct inet_skb_parm h4; 653 struct inet6_skb_parm h6; 654 } header; 655 656 union { 657 struct ip_tunnel *ip4; 658 struct ip6_tnl *ip6; 659 } tunnel; 660 }; 661 662 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0])) 663 664 /* 665 * This structure is used for the duration where packets are being 666 * transformed by IPsec. As soon as the packet leaves IPsec the 667 * area beyond the generic IP part may be overwritten. 668 */ 669 struct xfrm_skb_cb { 670 struct xfrm_tunnel_skb_cb header; 671 672 /* Sequence number for replay protection. */ 673 union { 674 struct { 675 __u32 low; 676 __u32 hi; 677 } output; 678 struct { 679 __be32 low; 680 __be32 hi; 681 } input; 682 } seq; 683 }; 684 685 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0])) 686 687 /* 688 * This structure is used by the afinfo prepare_input/prepare_output functions 689 * to transmit header information to the mode input/output functions. 690 */ 691 struct xfrm_mode_skb_cb { 692 struct xfrm_tunnel_skb_cb header; 693 694 /* Copied from header for IPv4, always set to zero and DF for IPv6. */ 695 __be16 id; 696 __be16 frag_off; 697 698 /* IP header length (excluding options or extension headers). */ 699 u8 ihl; 700 701 /* TOS for IPv4, class for IPv6. */ 702 u8 tos; 703 704 /* TTL for IPv4, hop limitfor IPv6. */ 705 u8 ttl; 706 707 /* Protocol for IPv4, NH for IPv6. */ 708 u8 protocol; 709 710 /* Option length for IPv4, zero for IPv6. */ 711 u8 optlen; 712 713 /* Used by IPv6 only, zero for IPv4. */ 714 u8 flow_lbl[3]; 715 }; 716 717 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0])) 718 719 /* 720 * This structure is used by the input processing to locate the SPI and 721 * related information. 722 */ 723 struct xfrm_spi_skb_cb { 724 struct xfrm_tunnel_skb_cb header; 725 726 unsigned int daddroff; 727 unsigned int family; 728 __be32 seq; 729 }; 730 731 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0])) 732 733 #ifdef CONFIG_AUDITSYSCALL 734 static inline struct audit_buffer *xfrm_audit_start(const char *op) 735 { 736 struct audit_buffer *audit_buf = NULL; 737 738 if (audit_enabled == AUDIT_OFF) 739 return NULL; 740 audit_buf = audit_log_start(audit_context(), GFP_ATOMIC, 741 AUDIT_MAC_IPSEC_EVENT); 742 if (audit_buf == NULL) 743 return NULL; 744 audit_log_format(audit_buf, "op=%s", op); 745 return audit_buf; 746 } 747 748 static inline void xfrm_audit_helper_usrinfo(bool task_valid, 749 struct audit_buffer *audit_buf) 750 { 751 const unsigned int auid = from_kuid(&init_user_ns, task_valid ? 752 audit_get_loginuid(current) : 753 INVALID_UID); 754 const unsigned int ses = task_valid ? audit_get_sessionid(current) : 755 AUDIT_SID_UNSET; 756 757 audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses); 758 audit_log_task_context(audit_buf); 759 } 760 761 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid); 762 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 763 bool task_valid); 764 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid); 765 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid); 766 void xfrm_audit_state_replay_overflow(struct xfrm_state *x, 767 struct sk_buff *skb); 768 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb, 769 __be32 net_seq); 770 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family); 771 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi, 772 __be32 net_seq); 773 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb, 774 u8 proto); 775 #else 776 777 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 778 bool task_valid) 779 { 780 } 781 782 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 783 bool task_valid) 784 { 785 } 786 787 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result, 788 bool task_valid) 789 { 790 } 791 792 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result, 793 bool task_valid) 794 { 795 } 796 797 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x, 798 struct sk_buff *skb) 799 { 800 } 801 802 static inline void xfrm_audit_state_replay(struct xfrm_state *x, 803 struct sk_buff *skb, __be32 net_seq) 804 { 805 } 806 807 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb, 808 u16 family) 809 { 810 } 811 812 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, 813 __be32 net_spi, __be32 net_seq) 814 { 815 } 816 817 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x, 818 struct sk_buff *skb, u8 proto) 819 { 820 } 821 #endif /* CONFIG_AUDITSYSCALL */ 822 823 static inline void xfrm_pol_hold(struct xfrm_policy *policy) 824 { 825 if (likely(policy != NULL)) 826 refcount_inc(&policy->refcnt); 827 } 828 829 void xfrm_policy_destroy(struct xfrm_policy *policy); 830 831 static inline void xfrm_pol_put(struct xfrm_policy *policy) 832 { 833 if (refcount_dec_and_test(&policy->refcnt)) 834 xfrm_policy_destroy(policy); 835 } 836 837 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols) 838 { 839 int i; 840 for (i = npols - 1; i >= 0; --i) 841 xfrm_pol_put(pols[i]); 842 } 843 844 void __xfrm_state_destroy(struct xfrm_state *, bool); 845 846 static inline void __xfrm_state_put(struct xfrm_state *x) 847 { 848 refcount_dec(&x->refcnt); 849 } 850 851 static inline void xfrm_state_put(struct xfrm_state *x) 852 { 853 if (refcount_dec_and_test(&x->refcnt)) 854 __xfrm_state_destroy(x, false); 855 } 856 857 static inline void xfrm_state_put_sync(struct xfrm_state *x) 858 { 859 if (refcount_dec_and_test(&x->refcnt)) 860 __xfrm_state_destroy(x, true); 861 } 862 863 static inline void xfrm_state_hold(struct xfrm_state *x) 864 { 865 refcount_inc(&x->refcnt); 866 } 867 868 static inline bool addr_match(const void *token1, const void *token2, 869 unsigned int prefixlen) 870 { 871 const __be32 *a1 = token1; 872 const __be32 *a2 = token2; 873 unsigned int pdw; 874 unsigned int pbi; 875 876 pdw = prefixlen >> 5; /* num of whole u32 in prefix */ 877 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */ 878 879 if (pdw) 880 if (memcmp(a1, a2, pdw << 2)) 881 return false; 882 883 if (pbi) { 884 __be32 mask; 885 886 mask = htonl((0xffffffff) << (32 - pbi)); 887 888 if ((a1[pdw] ^ a2[pdw]) & mask) 889 return false; 890 } 891 892 return true; 893 } 894 895 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen) 896 { 897 /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */ 898 if (sizeof(long) == 4 && prefixlen == 0) 899 return true; 900 return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen))); 901 } 902 903 static __inline__ 904 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli) 905 { 906 __be16 port; 907 switch(fl->flowi_proto) { 908 case IPPROTO_TCP: 909 case IPPROTO_UDP: 910 case IPPROTO_UDPLITE: 911 case IPPROTO_SCTP: 912 port = uli->ports.sport; 913 break; 914 case IPPROTO_ICMP: 915 case IPPROTO_ICMPV6: 916 port = htons(uli->icmpt.type); 917 break; 918 case IPPROTO_MH: 919 port = htons(uli->mht.type); 920 break; 921 case IPPROTO_GRE: 922 port = htons(ntohl(uli->gre_key) >> 16); 923 break; 924 default: 925 port = 0; /*XXX*/ 926 } 927 return port; 928 } 929 930 static __inline__ 931 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli) 932 { 933 __be16 port; 934 switch(fl->flowi_proto) { 935 case IPPROTO_TCP: 936 case IPPROTO_UDP: 937 case IPPROTO_UDPLITE: 938 case IPPROTO_SCTP: 939 port = uli->ports.dport; 940 break; 941 case IPPROTO_ICMP: 942 case IPPROTO_ICMPV6: 943 port = htons(uli->icmpt.code); 944 break; 945 case IPPROTO_GRE: 946 port = htons(ntohl(uli->gre_key) & 0xffff); 947 break; 948 default: 949 port = 0; /*XXX*/ 950 } 951 return port; 952 } 953 954 bool xfrm_selector_match(const struct xfrm_selector *sel, 955 const struct flowi *fl, unsigned short family); 956 957 #ifdef CONFIG_SECURITY_NETWORK_XFRM 958 /* If neither has a context --> match 959 * Otherwise, both must have a context and the sids, doi, alg must match 960 */ 961 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) 962 { 963 return ((!s1 && !s2) || 964 (s1 && s2 && 965 (s1->ctx_sid == s2->ctx_sid) && 966 (s1->ctx_doi == s2->ctx_doi) && 967 (s1->ctx_alg == s2->ctx_alg))); 968 } 969 #else 970 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) 971 { 972 return true; 973 } 974 #endif 975 976 /* A struct encoding bundle of transformations to apply to some set of flow. 977 * 978 * xdst->child points to the next element of bundle. 979 * dst->xfrm points to an instanse of transformer. 980 * 981 * Due to unfortunate limitations of current routing cache, which we 982 * have no time to fix, it mirrors struct rtable and bound to the same 983 * routing key, including saddr,daddr. However, we can have many of 984 * bundles differing by session id. All the bundles grow from a parent 985 * policy rule. 986 */ 987 struct xfrm_dst { 988 union { 989 struct dst_entry dst; 990 struct rtable rt; 991 struct rt6_info rt6; 992 } u; 993 struct dst_entry *route; 994 struct dst_entry *child; 995 struct dst_entry *path; 996 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 997 int num_pols, num_xfrms; 998 u32 xfrm_genid; 999 u32 policy_genid; 1000 u32 route_mtu_cached; 1001 u32 child_mtu_cached; 1002 u32 route_cookie; 1003 u32 path_cookie; 1004 }; 1005 1006 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) 1007 { 1008 #ifdef CONFIG_XFRM 1009 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { 1010 const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst; 1011 1012 return xdst->path; 1013 } 1014 #endif 1015 return (struct dst_entry *) dst; 1016 } 1017 1018 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst) 1019 { 1020 #ifdef CONFIG_XFRM 1021 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { 1022 struct xfrm_dst *xdst = (struct xfrm_dst *) dst; 1023 return xdst->child; 1024 } 1025 #endif 1026 return NULL; 1027 } 1028 1029 #ifdef CONFIG_XFRM 1030 static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child) 1031 { 1032 xdst->child = child; 1033 } 1034 1035 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) 1036 { 1037 xfrm_pols_put(xdst->pols, xdst->num_pols); 1038 dst_release(xdst->route); 1039 if (likely(xdst->u.dst.xfrm)) 1040 xfrm_state_put(xdst->u.dst.xfrm); 1041 } 1042 #endif 1043 1044 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); 1045 1046 struct xfrm_if_parms { 1047 int link; /* ifindex of underlying L2 interface */ 1048 u32 if_id; /* interface identifier */ 1049 bool collect_md; 1050 }; 1051 1052 struct xfrm_if { 1053 struct xfrm_if __rcu *next; /* next interface in list */ 1054 struct net_device *dev; /* virtual device associated with interface */ 1055 struct net *net; /* netns for packet i/o */ 1056 struct xfrm_if_parms p; /* interface parms */ 1057 1058 struct gro_cells gro_cells; 1059 }; 1060 1061 struct xfrm_offload { 1062 /* Output sequence number for replay protection on offloading. */ 1063 struct { 1064 __u32 low; 1065 __u32 hi; 1066 } seq; 1067 1068 __u32 flags; 1069 #define SA_DELETE_REQ 1 1070 #define CRYPTO_DONE 2 1071 #define CRYPTO_NEXT_DONE 4 1072 #define CRYPTO_FALLBACK 8 1073 #define XFRM_GSO_SEGMENT 16 1074 #define XFRM_GRO 32 1075 /* 64 is free */ 1076 #define XFRM_DEV_RESUME 128 1077 #define XFRM_XMIT 256 1078 1079 __u32 status; 1080 #define CRYPTO_SUCCESS 1 1081 #define CRYPTO_GENERIC_ERROR 2 1082 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED 4 1083 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED 8 1084 #define CRYPTO_TUNNEL_AH_AUTH_FAILED 16 1085 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED 32 1086 #define CRYPTO_INVALID_PACKET_SYNTAX 64 1087 #define CRYPTO_INVALID_PROTOCOL 128 1088 1089 /* Used to keep whole l2 header for transport mode GRO */ 1090 __u32 orig_mac_len; 1091 1092 __u8 proto; 1093 __u8 inner_ipproto; 1094 }; 1095 1096 struct sec_path { 1097 int len; 1098 int olen; 1099 int verified_cnt; 1100 1101 struct xfrm_state *xvec[XFRM_MAX_DEPTH]; 1102 struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH]; 1103 }; 1104 1105 struct sec_path *secpath_set(struct sk_buff *skb); 1106 1107 static inline void 1108 secpath_reset(struct sk_buff *skb) 1109 { 1110 #ifdef CONFIG_XFRM 1111 skb_ext_del(skb, SKB_EXT_SEC_PATH); 1112 #endif 1113 } 1114 1115 static inline int 1116 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family) 1117 { 1118 switch (family) { 1119 case AF_INET: 1120 return addr->a4 == 0; 1121 case AF_INET6: 1122 return ipv6_addr_any(&addr->in6); 1123 } 1124 return 0; 1125 } 1126 1127 static inline int 1128 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x) 1129 { 1130 return (tmpl->saddr.a4 && 1131 tmpl->saddr.a4 != x->props.saddr.a4); 1132 } 1133 1134 static inline int 1135 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x) 1136 { 1137 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) && 1138 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr)); 1139 } 1140 1141 static inline int 1142 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family) 1143 { 1144 switch (family) { 1145 case AF_INET: 1146 return __xfrm4_state_addr_cmp(tmpl, x); 1147 case AF_INET6: 1148 return __xfrm6_state_addr_cmp(tmpl, x); 1149 } 1150 return !0; 1151 } 1152 1153 #ifdef CONFIG_XFRM 1154 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb) 1155 { 1156 struct sec_path *sp = skb_sec_path(skb); 1157 1158 return sp->xvec[sp->len - 1]; 1159 } 1160 #endif 1161 1162 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) 1163 { 1164 #ifdef CONFIG_XFRM 1165 struct sec_path *sp = skb_sec_path(skb); 1166 1167 if (!sp || !sp->olen || sp->len != sp->olen) 1168 return NULL; 1169 1170 return &sp->ovec[sp->olen - 1]; 1171 #else 1172 return NULL; 1173 #endif 1174 } 1175 1176 #ifdef CONFIG_XFRM 1177 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, 1178 unsigned short family); 1179 1180 static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb, 1181 int dir) 1182 { 1183 if (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) 1184 return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT; 1185 1186 return false; 1187 } 1188 1189 static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb, 1190 int dir, unsigned short family) 1191 { 1192 if (dir != XFRM_POLICY_OUT && family == AF_INET) { 1193 /* same dst may be used for traffic originating from 1194 * devices with different policy settings. 1195 */ 1196 return IPCB(skb)->flags & IPSKB_NOPOLICY; 1197 } 1198 return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY); 1199 } 1200 1201 static inline int __xfrm_policy_check2(struct sock *sk, int dir, 1202 struct sk_buff *skb, 1203 unsigned int family, int reverse) 1204 { 1205 struct net *net = dev_net(skb->dev); 1206 int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0); 1207 struct xfrm_offload *xo = xfrm_offload(skb); 1208 struct xfrm_state *x; 1209 1210 if (sk && sk->sk_policy[XFRM_POLICY_IN]) 1211 return __xfrm_policy_check(sk, ndir, skb, family); 1212 1213 if (xo) { 1214 x = xfrm_input_state(skb); 1215 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 1216 return (xo->flags & CRYPTO_DONE) && 1217 (xo->status & CRYPTO_SUCCESS); 1218 } 1219 1220 return __xfrm_check_nopolicy(net, skb, dir) || 1221 __xfrm_check_dev_nopolicy(skb, dir, family) || 1222 __xfrm_policy_check(sk, ndir, skb, family); 1223 } 1224 1225 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) 1226 { 1227 return __xfrm_policy_check2(sk, dir, skb, family, 0); 1228 } 1229 1230 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 1231 { 1232 return xfrm_policy_check(sk, dir, skb, AF_INET); 1233 } 1234 1235 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 1236 { 1237 return xfrm_policy_check(sk, dir, skb, AF_INET6); 1238 } 1239 1240 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir, 1241 struct sk_buff *skb) 1242 { 1243 return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1); 1244 } 1245 1246 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, 1247 struct sk_buff *skb) 1248 { 1249 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1); 1250 } 1251 1252 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, 1253 unsigned int family, int reverse); 1254 1255 static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, 1256 unsigned int family) 1257 { 1258 return __xfrm_decode_session(net, skb, fl, family, 0); 1259 } 1260 1261 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb, 1262 struct flowi *fl, 1263 unsigned int family) 1264 { 1265 return __xfrm_decode_session(net, skb, fl, family, 1); 1266 } 1267 1268 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family); 1269 1270 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family) 1271 { 1272 struct net *net = dev_net(skb->dev); 1273 1274 if (!net->xfrm.policy_count[XFRM_POLICY_OUT] && 1275 net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT) 1276 return true; 1277 1278 return (skb_dst(skb)->flags & DST_NOXFRM) || 1279 __xfrm_route_forward(skb, family); 1280 } 1281 1282 static inline int xfrm4_route_forward(struct sk_buff *skb) 1283 { 1284 return xfrm_route_forward(skb, AF_INET); 1285 } 1286 1287 static inline int xfrm6_route_forward(struct sk_buff *skb) 1288 { 1289 return xfrm_route_forward(skb, AF_INET6); 1290 } 1291 1292 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk); 1293 1294 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) 1295 { 1296 if (!sk_fullsock(osk)) 1297 return 0; 1298 sk->sk_policy[0] = NULL; 1299 sk->sk_policy[1] = NULL; 1300 if (unlikely(osk->sk_policy[0] || osk->sk_policy[1])) 1301 return __xfrm_sk_clone_policy(sk, osk); 1302 return 0; 1303 } 1304 1305 int xfrm_policy_delete(struct xfrm_policy *pol, int dir); 1306 1307 static inline void xfrm_sk_free_policy(struct sock *sk) 1308 { 1309 struct xfrm_policy *pol; 1310 1311 pol = rcu_dereference_protected(sk->sk_policy[0], 1); 1312 if (unlikely(pol != NULL)) { 1313 xfrm_policy_delete(pol, XFRM_POLICY_MAX); 1314 sk->sk_policy[0] = NULL; 1315 } 1316 pol = rcu_dereference_protected(sk->sk_policy[1], 1); 1317 if (unlikely(pol != NULL)) { 1318 xfrm_policy_delete(pol, XFRM_POLICY_MAX+1); 1319 sk->sk_policy[1] = NULL; 1320 } 1321 } 1322 1323 #else 1324 1325 static inline void xfrm_sk_free_policy(struct sock *sk) {} 1326 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; } 1327 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; } 1328 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; } 1329 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 1330 { 1331 return 1; 1332 } 1333 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 1334 { 1335 return 1; 1336 } 1337 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) 1338 { 1339 return 1; 1340 } 1341 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb, 1342 struct flowi *fl, 1343 unsigned int family) 1344 { 1345 return -ENOSYS; 1346 } 1347 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir, 1348 struct sk_buff *skb) 1349 { 1350 return 1; 1351 } 1352 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, 1353 struct sk_buff *skb) 1354 { 1355 return 1; 1356 } 1357 #endif 1358 1359 static __inline__ 1360 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family) 1361 { 1362 switch (family){ 1363 case AF_INET: 1364 return (xfrm_address_t *)&fl->u.ip4.daddr; 1365 case AF_INET6: 1366 return (xfrm_address_t *)&fl->u.ip6.daddr; 1367 } 1368 return NULL; 1369 } 1370 1371 static __inline__ 1372 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family) 1373 { 1374 switch (family){ 1375 case AF_INET: 1376 return (xfrm_address_t *)&fl->u.ip4.saddr; 1377 case AF_INET6: 1378 return (xfrm_address_t *)&fl->u.ip6.saddr; 1379 } 1380 return NULL; 1381 } 1382 1383 static __inline__ 1384 void xfrm_flowi_addr_get(const struct flowi *fl, 1385 xfrm_address_t *saddr, xfrm_address_t *daddr, 1386 unsigned short family) 1387 { 1388 switch(family) { 1389 case AF_INET: 1390 memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4)); 1391 memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4)); 1392 break; 1393 case AF_INET6: 1394 saddr->in6 = fl->u.ip6.saddr; 1395 daddr->in6 = fl->u.ip6.daddr; 1396 break; 1397 } 1398 } 1399 1400 static __inline__ int 1401 __xfrm4_state_addr_check(const struct xfrm_state *x, 1402 const xfrm_address_t *daddr, const xfrm_address_t *saddr) 1403 { 1404 if (daddr->a4 == x->id.daddr.a4 && 1405 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4)) 1406 return 1; 1407 return 0; 1408 } 1409 1410 static __inline__ int 1411 __xfrm6_state_addr_check(const struct xfrm_state *x, 1412 const xfrm_address_t *daddr, const xfrm_address_t *saddr) 1413 { 1414 if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) && 1415 (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) || 1416 ipv6_addr_any((struct in6_addr *)saddr) || 1417 ipv6_addr_any((struct in6_addr *)&x->props.saddr))) 1418 return 1; 1419 return 0; 1420 } 1421 1422 static __inline__ int 1423 xfrm_state_addr_check(const struct xfrm_state *x, 1424 const xfrm_address_t *daddr, const xfrm_address_t *saddr, 1425 unsigned short family) 1426 { 1427 switch (family) { 1428 case AF_INET: 1429 return __xfrm4_state_addr_check(x, daddr, saddr); 1430 case AF_INET6: 1431 return __xfrm6_state_addr_check(x, daddr, saddr); 1432 } 1433 return 0; 1434 } 1435 1436 static __inline__ int 1437 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl, 1438 unsigned short family) 1439 { 1440 switch (family) { 1441 case AF_INET: 1442 return __xfrm4_state_addr_check(x, 1443 (const xfrm_address_t *)&fl->u.ip4.daddr, 1444 (const xfrm_address_t *)&fl->u.ip4.saddr); 1445 case AF_INET6: 1446 return __xfrm6_state_addr_check(x, 1447 (const xfrm_address_t *)&fl->u.ip6.daddr, 1448 (const xfrm_address_t *)&fl->u.ip6.saddr); 1449 } 1450 return 0; 1451 } 1452 1453 static inline int xfrm_state_kern(const struct xfrm_state *x) 1454 { 1455 return atomic_read(&x->tunnel_users); 1456 } 1457 1458 static inline bool xfrm_id_proto_valid(u8 proto) 1459 { 1460 switch (proto) { 1461 case IPPROTO_AH: 1462 case IPPROTO_ESP: 1463 case IPPROTO_COMP: 1464 #if IS_ENABLED(CONFIG_IPV6) 1465 case IPPROTO_ROUTING: 1466 case IPPROTO_DSTOPTS: 1467 #endif 1468 return true; 1469 default: 1470 return false; 1471 } 1472 } 1473 1474 /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */ 1475 static inline int xfrm_id_proto_match(u8 proto, u8 userproto) 1476 { 1477 return (!userproto || proto == userproto || 1478 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH || 1479 proto == IPPROTO_ESP || 1480 proto == IPPROTO_COMP))); 1481 } 1482 1483 /* 1484 * xfrm algorithm information 1485 */ 1486 struct xfrm_algo_aead_info { 1487 char *geniv; 1488 u16 icv_truncbits; 1489 }; 1490 1491 struct xfrm_algo_auth_info { 1492 u16 icv_truncbits; 1493 u16 icv_fullbits; 1494 }; 1495 1496 struct xfrm_algo_encr_info { 1497 char *geniv; 1498 u16 blockbits; 1499 u16 defkeybits; 1500 }; 1501 1502 struct xfrm_algo_comp_info { 1503 u16 threshold; 1504 }; 1505 1506 struct xfrm_algo_desc { 1507 char *name; 1508 char *compat; 1509 u8 available:1; 1510 u8 pfkey_supported:1; 1511 union { 1512 struct xfrm_algo_aead_info aead; 1513 struct xfrm_algo_auth_info auth; 1514 struct xfrm_algo_encr_info encr; 1515 struct xfrm_algo_comp_info comp; 1516 } uinfo; 1517 struct sadb_alg desc; 1518 }; 1519 1520 /* XFRM protocol handlers. */ 1521 struct xfrm4_protocol { 1522 int (*handler)(struct sk_buff *skb); 1523 int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi, 1524 int encap_type); 1525 int (*cb_handler)(struct sk_buff *skb, int err); 1526 int (*err_handler)(struct sk_buff *skb, u32 info); 1527 1528 struct xfrm4_protocol __rcu *next; 1529 int priority; 1530 }; 1531 1532 struct xfrm6_protocol { 1533 int (*handler)(struct sk_buff *skb); 1534 int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi, 1535 int encap_type); 1536 int (*cb_handler)(struct sk_buff *skb, int err); 1537 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 1538 u8 type, u8 code, int offset, __be32 info); 1539 1540 struct xfrm6_protocol __rcu *next; 1541 int priority; 1542 }; 1543 1544 /* XFRM tunnel handlers. */ 1545 struct xfrm_tunnel { 1546 int (*handler)(struct sk_buff *skb); 1547 int (*cb_handler)(struct sk_buff *skb, int err); 1548 int (*err_handler)(struct sk_buff *skb, u32 info); 1549 1550 struct xfrm_tunnel __rcu *next; 1551 int priority; 1552 }; 1553 1554 struct xfrm6_tunnel { 1555 int (*handler)(struct sk_buff *skb); 1556 int (*cb_handler)(struct sk_buff *skb, int err); 1557 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 1558 u8 type, u8 code, int offset, __be32 info); 1559 struct xfrm6_tunnel __rcu *next; 1560 int priority; 1561 }; 1562 1563 void xfrm_init(void); 1564 void xfrm4_init(void); 1565 int xfrm_state_init(struct net *net); 1566 void xfrm_state_fini(struct net *net); 1567 void xfrm4_state_init(void); 1568 void xfrm4_protocol_init(void); 1569 #ifdef CONFIG_XFRM 1570 int xfrm6_init(void); 1571 void xfrm6_fini(void); 1572 int xfrm6_state_init(void); 1573 void xfrm6_state_fini(void); 1574 int xfrm6_protocol_init(void); 1575 void xfrm6_protocol_fini(void); 1576 #else 1577 static inline int xfrm6_init(void) 1578 { 1579 return 0; 1580 } 1581 static inline void xfrm6_fini(void) 1582 { 1583 ; 1584 } 1585 #endif 1586 1587 #ifdef CONFIG_XFRM_STATISTICS 1588 int xfrm_proc_init(struct net *net); 1589 void xfrm_proc_fini(struct net *net); 1590 #endif 1591 1592 int xfrm_sysctl_init(struct net *net); 1593 #ifdef CONFIG_SYSCTL 1594 void xfrm_sysctl_fini(struct net *net); 1595 #else 1596 static inline void xfrm_sysctl_fini(struct net *net) 1597 { 1598 } 1599 #endif 1600 1601 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto, 1602 struct xfrm_address_filter *filter); 1603 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, 1604 int (*func)(struct xfrm_state *, int, void*), void *); 1605 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net); 1606 struct xfrm_state *xfrm_state_alloc(struct net *net); 1607 void xfrm_state_free(struct xfrm_state *x); 1608 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr, 1609 const xfrm_address_t *saddr, 1610 const struct flowi *fl, 1611 struct xfrm_tmpl *tmpl, 1612 struct xfrm_policy *pol, int *err, 1613 unsigned short family, u32 if_id); 1614 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id, 1615 xfrm_address_t *daddr, 1616 xfrm_address_t *saddr, 1617 unsigned short family, 1618 u8 mode, u8 proto, u32 reqid); 1619 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, 1620 unsigned short family); 1621 int xfrm_state_check_expire(struct xfrm_state *x); 1622 void xfrm_state_update_stats(struct net *net); 1623 #ifdef CONFIG_XFRM_OFFLOAD 1624 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) 1625 { 1626 struct xfrm_dev_offload *xdo = &x->xso; 1627 struct net_device *dev = READ_ONCE(xdo->dev); 1628 1629 if (dev && dev->xfrmdev_ops && 1630 dev->xfrmdev_ops->xdo_dev_state_update_stats) 1631 dev->xfrmdev_ops->xdo_dev_state_update_stats(x); 1632 1633 } 1634 #else 1635 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {} 1636 #endif 1637 void xfrm_state_insert(struct xfrm_state *x); 1638 int xfrm_state_add(struct xfrm_state *x); 1639 int xfrm_state_update(struct xfrm_state *x); 1640 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark, 1641 const xfrm_address_t *daddr, __be32 spi, 1642 u8 proto, unsigned short family); 1643 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark, 1644 const xfrm_address_t *daddr, 1645 const xfrm_address_t *saddr, 1646 u8 proto, 1647 unsigned short family); 1648 #ifdef CONFIG_XFRM_SUB_POLICY 1649 void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, 1650 unsigned short family); 1651 void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, 1652 unsigned short family); 1653 #else 1654 static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s, 1655 int n, unsigned short family) 1656 { 1657 } 1658 1659 static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s, 1660 int n, unsigned short family) 1661 { 1662 } 1663 #endif 1664 1665 struct xfrmk_sadinfo { 1666 u32 sadhcnt; /* current hash bkts */ 1667 u32 sadhmcnt; /* max allowed hash bkts */ 1668 u32 sadcnt; /* current running count */ 1669 }; 1670 1671 struct xfrmk_spdinfo { 1672 u32 incnt; 1673 u32 outcnt; 1674 u32 fwdcnt; 1675 u32 inscnt; 1676 u32 outscnt; 1677 u32 fwdscnt; 1678 u32 spdhcnt; 1679 u32 spdhmcnt; 1680 }; 1681 1682 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); 1683 int xfrm_state_delete(struct xfrm_state *x); 1684 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync); 1685 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); 1686 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev, 1687 bool task_valid); 1688 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); 1689 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); 1690 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); 1691 int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack); 1692 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu); 1693 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, 1694 struct netlink_ext_ack *extack); 1695 int xfrm_init_state(struct xfrm_state *x); 1696 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); 1697 int xfrm_input_resume(struct sk_buff *skb, int nexthdr); 1698 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, 1699 int (*finish)(struct net *, struct sock *, 1700 struct sk_buff *)); 1701 int xfrm_trans_queue(struct sk_buff *skb, 1702 int (*finish)(struct net *, struct sock *, 1703 struct sk_buff *)); 1704 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err); 1705 int xfrm_output(struct sock *sk, struct sk_buff *skb); 1706 1707 #if IS_ENABLED(CONFIG_NET_PKTGEN) 1708 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb); 1709 #endif 1710 1711 void xfrm_local_error(struct sk_buff *skb, int mtu); 1712 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, 1713 int encap_type); 1714 int xfrm4_transport_finish(struct sk_buff *skb, int async); 1715 int xfrm4_rcv(struct sk_buff *skb); 1716 1717 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) 1718 { 1719 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; 1720 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 1721 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 1722 return xfrm_input(skb, nexthdr, spi, 0); 1723 } 1724 1725 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb); 1726 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); 1727 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol); 1728 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); 1729 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); 1730 void xfrm4_local_error(struct sk_buff *skb, u32 mtu); 1731 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, 1732 struct ip6_tnl *t); 1733 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, 1734 int encap_type); 1735 int xfrm6_transport_finish(struct sk_buff *skb, int async); 1736 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t); 1737 int xfrm6_rcv(struct sk_buff *skb); 1738 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, 1739 xfrm_address_t *saddr, u8 proto); 1740 void xfrm6_local_error(struct sk_buff *skb, u32 mtu); 1741 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol); 1742 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol); 1743 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family); 1744 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family); 1745 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); 1746 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); 1747 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb); 1748 1749 #ifdef CONFIG_XFRM 1750 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu); 1751 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); 1752 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); 1753 struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, 1754 struct sk_buff *skb); 1755 struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, 1756 struct sk_buff *skb); 1757 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, 1758 int optlen); 1759 #else 1760 static inline int xfrm_user_policy(struct sock *sk, int optname, 1761 sockptr_t optval, int optlen) 1762 { 1763 return -ENOPROTOOPT; 1764 } 1765 #endif 1766 1767 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif, 1768 const xfrm_address_t *saddr, 1769 const xfrm_address_t *daddr, 1770 int family, u32 mark); 1771 1772 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp); 1773 1774 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type); 1775 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 1776 int (*func)(struct xfrm_policy *, int, int, void*), 1777 void *); 1778 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net); 1779 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); 1780 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, 1781 const struct xfrm_mark *mark, 1782 u32 if_id, u8 type, int dir, 1783 struct xfrm_selector *sel, 1784 struct xfrm_sec_ctx *ctx, int delete, 1785 int *err); 1786 struct xfrm_policy *xfrm_policy_byid(struct net *net, 1787 const struct xfrm_mark *mark, u32 if_id, 1788 u8 type, int dir, u32 id, int delete, 1789 int *err); 1790 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); 1791 void xfrm_policy_hash_rebuild(struct net *net); 1792 u32 xfrm_get_acqseq(void); 1793 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack); 1794 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi, 1795 struct netlink_ext_ack *extack); 1796 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, 1797 u8 mode, u32 reqid, u32 if_id, u8 proto, 1798 const xfrm_address_t *daddr, 1799 const xfrm_address_t *saddr, int create, 1800 unsigned short family); 1801 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); 1802 1803 #ifdef CONFIG_XFRM_MIGRATE 1804 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 1805 const struct xfrm_migrate *m, int num_bundles, 1806 const struct xfrm_kmaddress *k, 1807 const struct xfrm_encap_tmpl *encap); 1808 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net, 1809 u32 if_id); 1810 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, 1811 struct xfrm_migrate *m, 1812 struct xfrm_encap_tmpl *encap); 1813 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 1814 struct xfrm_migrate *m, int num_bundles, 1815 struct xfrm_kmaddress *k, struct net *net, 1816 struct xfrm_encap_tmpl *encap, u32 if_id, 1817 struct netlink_ext_ack *extack); 1818 #endif 1819 1820 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); 1821 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid); 1822 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, 1823 xfrm_address_t *addr); 1824 1825 void xfrm_input_init(void); 1826 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq); 1827 1828 void xfrm_probe_algs(void); 1829 int xfrm_count_pfkey_auth_supported(void); 1830 int xfrm_count_pfkey_enc_supported(void); 1831 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx); 1832 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx); 1833 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id); 1834 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id); 1835 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id); 1836 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe); 1837 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe); 1838 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe); 1839 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len, 1840 int probe); 1841 1842 static inline bool xfrm6_addr_equal(const xfrm_address_t *a, 1843 const xfrm_address_t *b) 1844 { 1845 return ipv6_addr_equal((const struct in6_addr *)a, 1846 (const struct in6_addr *)b); 1847 } 1848 1849 static inline bool xfrm_addr_equal(const xfrm_address_t *a, 1850 const xfrm_address_t *b, 1851 sa_family_t family) 1852 { 1853 switch (family) { 1854 default: 1855 case AF_INET: 1856 return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0; 1857 case AF_INET6: 1858 return xfrm6_addr_equal(a, b); 1859 } 1860 } 1861 1862 static inline int xfrm_policy_id2dir(u32 index) 1863 { 1864 return index & 7; 1865 } 1866 1867 #ifdef CONFIG_XFRM 1868 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq); 1869 int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); 1870 void xfrm_replay_notify(struct xfrm_state *x, int event); 1871 int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb); 1872 int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); 1873 1874 static inline int xfrm_aevent_is_on(struct net *net) 1875 { 1876 struct sock *nlsk; 1877 int ret = 0; 1878 1879 rcu_read_lock(); 1880 nlsk = rcu_dereference(net->xfrm.nlsk); 1881 if (nlsk) 1882 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS); 1883 rcu_read_unlock(); 1884 return ret; 1885 } 1886 1887 static inline int xfrm_acquire_is_on(struct net *net) 1888 { 1889 struct sock *nlsk; 1890 int ret = 0; 1891 1892 rcu_read_lock(); 1893 nlsk = rcu_dereference(net->xfrm.nlsk); 1894 if (nlsk) 1895 ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE); 1896 rcu_read_unlock(); 1897 1898 return ret; 1899 } 1900 #endif 1901 1902 static inline unsigned int aead_len(struct xfrm_algo_aead *alg) 1903 { 1904 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 1905 } 1906 1907 static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg) 1908 { 1909 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 1910 } 1911 1912 static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg) 1913 { 1914 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 1915 } 1916 1917 static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn) 1918 { 1919 return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32); 1920 } 1921 1922 #ifdef CONFIG_XFRM_MIGRATE 1923 static inline int xfrm_replay_clone(struct xfrm_state *x, 1924 struct xfrm_state *orig) 1925 { 1926 1927 x->replay_esn = kmemdup(orig->replay_esn, 1928 xfrm_replay_state_esn_len(orig->replay_esn), 1929 GFP_KERNEL); 1930 if (!x->replay_esn) 1931 return -ENOMEM; 1932 x->preplay_esn = kmemdup(orig->preplay_esn, 1933 xfrm_replay_state_esn_len(orig->preplay_esn), 1934 GFP_KERNEL); 1935 if (!x->preplay_esn) 1936 return -ENOMEM; 1937 1938 return 0; 1939 } 1940 1941 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig) 1942 { 1943 return kmemdup(orig, aead_len(orig), GFP_KERNEL); 1944 } 1945 1946 1947 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) 1948 { 1949 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL); 1950 } 1951 1952 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig) 1953 { 1954 return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL); 1955 } 1956 1957 static inline void xfrm_states_put(struct xfrm_state **states, int n) 1958 { 1959 int i; 1960 for (i = 0; i < n; i++) 1961 xfrm_state_put(*(states + i)); 1962 } 1963 1964 static inline void xfrm_states_delete(struct xfrm_state **states, int n) 1965 { 1966 int i; 1967 for (i = 0; i < n; i++) 1968 xfrm_state_delete(*(states + i)); 1969 } 1970 #endif 1971 1972 void __init xfrm_dev_init(void); 1973 1974 #ifdef CONFIG_XFRM_OFFLOAD 1975 void xfrm_dev_resume(struct sk_buff *skb); 1976 void xfrm_dev_backlog(struct softnet_data *sd); 1977 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again); 1978 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 1979 struct xfrm_user_offload *xuo, 1980 struct netlink_ext_ack *extack); 1981 int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, 1982 struct xfrm_user_offload *xuo, u8 dir, 1983 struct netlink_ext_ack *extack); 1984 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); 1985 void xfrm_dev_state_delete(struct xfrm_state *x); 1986 void xfrm_dev_state_free(struct xfrm_state *x); 1987 1988 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x) 1989 { 1990 struct xfrm_dev_offload *xso = &x->xso; 1991 struct net_device *dev = READ_ONCE(xso->dev); 1992 1993 if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn) 1994 dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); 1995 } 1996 1997 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) 1998 { 1999 struct xfrm_state *x = dst->xfrm; 2000 struct xfrm_dst *xdst; 2001 2002 if (!x || !x->type_offload) 2003 return false; 2004 2005 xdst = (struct xfrm_dst *) dst; 2006 if (!x->xso.offload_handle && !xdst->child->xfrm) 2007 return true; 2008 if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) && 2009 !xdst->child->xfrm) 2010 return true; 2011 2012 return false; 2013 } 2014 2015 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x) 2016 { 2017 struct xfrm_dev_offload *xdo = &x->xdo; 2018 struct net_device *dev = xdo->dev; 2019 2020 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete) 2021 dev->xfrmdev_ops->xdo_dev_policy_delete(x); 2022 } 2023 2024 static inline void xfrm_dev_policy_free(struct xfrm_policy *x) 2025 { 2026 struct xfrm_dev_offload *xdo = &x->xdo; 2027 struct net_device *dev = xdo->dev; 2028 2029 if (dev && dev->xfrmdev_ops) { 2030 if (dev->xfrmdev_ops->xdo_dev_policy_free) 2031 dev->xfrmdev_ops->xdo_dev_policy_free(x); 2032 xdo->dev = NULL; 2033 netdev_put(dev, &xdo->dev_tracker); 2034 } 2035 } 2036 #else 2037 static inline void xfrm_dev_resume(struct sk_buff *skb) 2038 { 2039 } 2040 2041 static inline void xfrm_dev_backlog(struct softnet_data *sd) 2042 { 2043 } 2044 2045 static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) 2046 { 2047 return skb; 2048 } 2049 2050 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack) 2051 { 2052 return 0; 2053 } 2054 2055 static inline void xfrm_dev_state_delete(struct xfrm_state *x) 2056 { 2057 } 2058 2059 static inline void xfrm_dev_state_free(struct xfrm_state *x) 2060 { 2061 } 2062 2063 static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, 2064 struct xfrm_user_offload *xuo, u8 dir, 2065 struct netlink_ext_ack *extack) 2066 { 2067 return 0; 2068 } 2069 2070 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x) 2071 { 2072 } 2073 2074 static inline void xfrm_dev_policy_free(struct xfrm_policy *x) 2075 { 2076 } 2077 2078 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 2079 { 2080 return false; 2081 } 2082 2083 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x) 2084 { 2085 } 2086 2087 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) 2088 { 2089 return false; 2090 } 2091 #endif 2092 2093 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) 2094 { 2095 if (attrs[XFRMA_MARK]) 2096 memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark)); 2097 else 2098 m->v = m->m = 0; 2099 2100 return m->v & m->m; 2101 } 2102 2103 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m) 2104 { 2105 int ret = 0; 2106 2107 if (m->m | m->v) 2108 ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m); 2109 return ret; 2110 } 2111 2112 static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x) 2113 { 2114 struct xfrm_mark *m = &x->props.smark; 2115 2116 return (m->v & m->m) | (mark & ~m->m); 2117 } 2118 2119 static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id) 2120 { 2121 int ret = 0; 2122 2123 if (if_id) 2124 ret = nla_put_u32(skb, XFRMA_IF_ID, if_id); 2125 return ret; 2126 } 2127 2128 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x, 2129 unsigned int family) 2130 { 2131 bool tunnel = false; 2132 2133 switch(family) { 2134 case AF_INET: 2135 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) 2136 tunnel = true; 2137 break; 2138 case AF_INET6: 2139 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6) 2140 tunnel = true; 2141 break; 2142 } 2143 if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL)) 2144 return -EINVAL; 2145 2146 return 0; 2147 } 2148 2149 extern const int xfrm_msg_min[XFRM_NR_MSGTYPES]; 2150 extern const struct nla_policy xfrma_policy[XFRMA_MAX+1]; 2151 2152 struct xfrm_translator { 2153 /* Allocate frag_list and put compat translation there */ 2154 int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src); 2155 2156 /* Allocate nlmsg with 64-bit translaton of received 32-bit message */ 2157 struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh, 2158 int maxtype, const struct nla_policy *policy, 2159 struct netlink_ext_ack *extack); 2160 2161 /* Translate 32-bit user_policy from sockptr */ 2162 int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen); 2163 2164 struct module *owner; 2165 }; 2166 2167 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT) 2168 extern int xfrm_register_translator(struct xfrm_translator *xtr); 2169 extern int xfrm_unregister_translator(struct xfrm_translator *xtr); 2170 extern struct xfrm_translator *xfrm_get_translator(void); 2171 extern void xfrm_put_translator(struct xfrm_translator *xtr); 2172 #else 2173 static inline struct xfrm_translator *xfrm_get_translator(void) 2174 { 2175 return NULL; 2176 } 2177 static inline void xfrm_put_translator(struct xfrm_translator *xtr) 2178 { 2179 } 2180 #endif 2181 2182 #if IS_ENABLED(CONFIG_IPV6) 2183 static inline bool xfrm6_local_dontfrag(const struct sock *sk) 2184 { 2185 int proto; 2186 2187 if (!sk || sk->sk_family != AF_INET6) 2188 return false; 2189 2190 proto = sk->sk_protocol; 2191 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) 2192 return inet6_test_bit(DONTFRAG, sk); 2193 2194 return false; 2195 } 2196 #endif 2197 2198 #if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \ 2199 (IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) 2200 2201 extern struct metadata_dst __percpu *xfrm_bpf_md_dst; 2202 2203 int register_xfrm_interface_bpf(void); 2204 2205 #else 2206 2207 static inline int register_xfrm_interface_bpf(void) 2208 { 2209 return 0; 2210 } 2211 2212 #endif 2213 2214 #if IS_ENABLED(CONFIG_DEBUG_INFO_BTF) 2215 int register_xfrm_state_bpf(void); 2216 #else 2217 static inline int register_xfrm_state_bpf(void) 2218 { 2219 return 0; 2220 } 2221 #endif 2222 2223 int xfrm_nat_keepalive_init(unsigned short family); 2224 void xfrm_nat_keepalive_fini(unsigned short family); 2225 int xfrm_nat_keepalive_net_init(struct net *net); 2226 int xfrm_nat_keepalive_net_fini(struct net *net); 2227 void xfrm_nat_keepalive_state_updated(struct xfrm_state *x); 2228 2229 #endif /* _NET_XFRM_H */ 2230