1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _NET_XFRM_H 3 #define _NET_XFRM_H 4 5 #include <linux/compiler.h> 6 #include <linux/xfrm.h> 7 #include <linux/spinlock.h> 8 #include <linux/list.h> 9 #include <linux/skbuff.h> 10 #include <linux/socket.h> 11 #include <linux/pfkeyv2.h> 12 #include <linux/ipsec.h> 13 #include <linux/in6.h> 14 #include <linux/mutex.h> 15 #include <linux/audit.h> 16 #include <linux/slab.h> 17 #include <linux/refcount.h> 18 #include <linux/sockptr.h> 19 20 #include <net/sock.h> 21 #include <net/dst.h> 22 #include <net/inet_dscp.h> 23 #include <net/ip.h> 24 #include <net/route.h> 25 #include <net/ipv6.h> 26 #include <net/ip6_fib.h> 27 #include <net/flow.h> 28 #include <net/gro_cells.h> 29 30 #include <linux/interrupt.h> 31 32 #ifdef CONFIG_XFRM_STATISTICS 33 #include <net/snmp.h> 34 #endif 35 36 #define XFRM_PROTO_ESP 50 37 #define XFRM_PROTO_AH 51 38 #define XFRM_PROTO_COMP 108 39 #define XFRM_PROTO_IPIP 4 40 #define XFRM_PROTO_IPV6 41 41 #define XFRM_PROTO_IPTFS IPPROTO_AGGFRAG 42 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING 43 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS 44 45 #define XFRM_ALIGN4(len) (((len) + 3) & ~3) 46 #define XFRM_ALIGN8(len) (((len) + 7) & ~7) 47 #define MODULE_ALIAS_XFRM_MODE(family, encap) \ 48 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap)) 49 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \ 50 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto)) 51 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \ 52 MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto)) 53 54 #ifdef CONFIG_XFRM_STATISTICS 55 #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field) 56 #define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val) 57 #else 58 #define XFRM_INC_STATS(net, field) ((void)(net)) 59 #define XFRM_ADD_STATS(net, field, val) ((void)(net)) 60 #endif 61 62 63 /* Organization of SPD aka "XFRM rules" 64 ------------------------------------ 65 66 Basic objects: 67 - policy rule, struct xfrm_policy (=SPD entry) 68 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle) 69 - instance of a transformer, struct xfrm_state (=SA) 70 - template to clone xfrm_state, struct xfrm_tmpl 71 72 SPD is organized as hash table (for policies that meet minimum address prefix 73 length setting, net->xfrm.policy_hthresh). Other policies are stored in 74 lists, sorted into rbtree ordered by destination and source address networks. 75 See net/xfrm/xfrm_policy.c for details. 76 77 (To be compatible with existing pfkeyv2 implementations, 78 many rules with priority of 0x7fffffff are allowed to exist and 79 such rules are ordered in an unpredictable way, thanks to bsd folks.) 80 81 If "action" is "block", then we prohibit the flow, otherwise: 82 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise, 83 policy entry has list of up to XFRM_MAX_DEPTH transformations, 84 described by templates xfrm_tmpl. Each template is resolved 85 to a complete xfrm_state (see below) and we pack bundle of transformations 86 to a dst_entry returned to requester. 87 88 dst -. xfrm .-> xfrm_state #1 89 |---. child .-> dst -. xfrm .-> xfrm_state #2 90 |---. child .-> dst -. xfrm .-> xfrm_state #3 91 |---. child .-> NULL 92 93 94 Resolution of xrfm_tmpl 95 ----------------------- 96 Template contains: 97 1. ->mode Mode: transport or tunnel 98 2. ->id.proto Protocol: AH/ESP/IPCOMP 99 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode. 100 Q: allow to resolve security gateway? 101 4. ->id.spi If not zero, static SPI. 102 5. ->saddr Local tunnel endpoint, ignored for transport mode. 103 6. ->algos List of allowed algos. Plain bitmask now. 104 Q: ealgos, aalgos, calgos. What a mess... 105 7. ->share Sharing mode. 106 Q: how to implement private sharing mode? To add struct sock* to 107 flow id? 108 109 Having this template we search through SAD searching for entries 110 with appropriate mode/proto/algo, permitted by selector. 111 If no appropriate entry found, it is requested from key manager. 112 113 PROBLEMS: 114 Q: How to find all the bundles referring to a physical path for 115 PMTU discovery? Seems, dst should contain list of all parents... 116 and enter to infinite locking hierarchy disaster. 117 No! It is easier, we will not search for them, let them find us. 118 We add genid to each dst plus pointer to genid of raw IP route, 119 pmtu disc will update pmtu on raw IP route and increase its genid. 120 dst_check() will see this for top level and trigger resyncing 121 metrics. Plus, it will be made via sk->sk_dst_cache. Solved. 122 */ 123 124 struct xfrm_state_walk { 125 struct list_head all; 126 u8 state; 127 u8 dying; 128 u8 proto; 129 u32 seq; 130 struct xfrm_address_filter *filter; 131 }; 132 133 enum { 134 XFRM_DEV_OFFLOAD_IN = 1, 135 XFRM_DEV_OFFLOAD_OUT, 136 XFRM_DEV_OFFLOAD_FWD, 137 }; 138 139 enum { 140 XFRM_DEV_OFFLOAD_UNSPECIFIED, 141 XFRM_DEV_OFFLOAD_CRYPTO, 142 XFRM_DEV_OFFLOAD_PACKET, 143 }; 144 145 enum { 146 XFRM_DEV_OFFLOAD_FLAG_ACQ = 1, 147 }; 148 149 struct xfrm_dev_offload { 150 struct net_device *dev; 151 netdevice_tracker dev_tracker; 152 struct net_device *real_dev; 153 unsigned long offload_handle; 154 u8 dir : 2; 155 u8 type : 2; 156 u8 flags : 2; 157 }; 158 159 struct xfrm_mode { 160 u8 encap; 161 u8 family; 162 u8 flags; 163 }; 164 165 /* Flags for xfrm_mode. */ 166 enum { 167 XFRM_MODE_FLAG_TUNNEL = 1, 168 }; 169 170 enum xfrm_replay_mode { 171 XFRM_REPLAY_MODE_LEGACY, 172 XFRM_REPLAY_MODE_BMP, 173 XFRM_REPLAY_MODE_ESN, 174 }; 175 176 /* Full description of state of transformer. */ 177 struct xfrm_state { 178 possible_net_t xs_net; 179 union { 180 struct hlist_node gclist; 181 struct hlist_node bydst; 182 }; 183 union { 184 struct hlist_node dev_gclist; 185 struct hlist_node bysrc; 186 }; 187 struct hlist_node byspi; 188 struct hlist_node byseq; 189 struct hlist_node state_cache; 190 struct hlist_node state_cache_input; 191 192 refcount_t refcnt; 193 spinlock_t lock; 194 195 u32 pcpu_num; 196 struct xfrm_id id; 197 struct xfrm_selector sel; 198 struct xfrm_mark mark; 199 u32 if_id; 200 u32 tfcpad; 201 202 u32 genid; 203 204 /* Key manager bits */ 205 struct xfrm_state_walk km; 206 207 /* Parameters of this state. */ 208 struct { 209 u32 reqid; 210 u8 mode; 211 u8 replay_window; 212 u8 aalgo, ealgo, calgo; 213 u8 flags; 214 u16 family; 215 xfrm_address_t saddr; 216 int header_len; 217 int enc_hdr_len; 218 int trailer_len; 219 u32 extra_flags; 220 struct xfrm_mark smark; 221 } props; 222 223 struct xfrm_lifetime_cfg lft; 224 225 /* Data for transformer */ 226 struct xfrm_algo_auth *aalg; 227 struct xfrm_algo *ealg; 228 struct xfrm_algo *calg; 229 struct xfrm_algo_aead *aead; 230 const char *geniv; 231 232 /* mapping change rate limiting */ 233 __be16 new_mapping_sport; 234 u32 new_mapping; /* seconds */ 235 u32 mapping_maxage; /* seconds for input SA */ 236 237 /* Data for encapsulator */ 238 struct xfrm_encap_tmpl *encap; 239 struct sock __rcu *encap_sk; 240 241 /* NAT keepalive */ 242 u32 nat_keepalive_interval; /* seconds */ 243 time64_t nat_keepalive_expiration; 244 245 /* Data for care-of address */ 246 xfrm_address_t *coaddr; 247 248 /* IPComp needs an IPIP tunnel for handling uncompressed packets */ 249 struct xfrm_state *tunnel; 250 251 /* If a tunnel, number of users + 1 */ 252 atomic_t tunnel_users; 253 254 /* State for replay detection */ 255 struct xfrm_replay_state replay; 256 struct xfrm_replay_state_esn *replay_esn; 257 258 /* Replay detection state at the time we sent the last notification */ 259 struct xfrm_replay_state preplay; 260 struct xfrm_replay_state_esn *preplay_esn; 261 262 /* replay detection mode */ 263 enum xfrm_replay_mode repl_mode; 264 /* internal flag that only holds state for delayed aevent at the 265 * moment 266 */ 267 u32 xflags; 268 269 /* Replay detection notification settings */ 270 u32 replay_maxage; 271 u32 replay_maxdiff; 272 273 /* Replay detection notification timer */ 274 struct timer_list rtimer; 275 276 /* Statistics */ 277 struct xfrm_stats stats; 278 279 struct xfrm_lifetime_cur curlft; 280 struct hrtimer mtimer; 281 282 struct xfrm_dev_offload xso; 283 284 /* used to fix curlft->add_time when changing date */ 285 long saved_tmo; 286 287 /* Last used time */ 288 time64_t lastused; 289 290 struct page_frag xfrag; 291 292 /* Reference to data common to all the instances of this 293 * transformer. */ 294 const struct xfrm_type *type; 295 struct xfrm_mode inner_mode; 296 struct xfrm_mode inner_mode_iaf; 297 struct xfrm_mode outer_mode; 298 299 const struct xfrm_type_offload *type_offload; 300 301 /* Security context */ 302 struct xfrm_sec_ctx *security; 303 304 /* Private data of this transformer, format is opaque, 305 * interpreted by xfrm_type methods. */ 306 void *data; 307 u8 dir; 308 309 const struct xfrm_mode_cbs *mode_cbs; 310 void *mode_data; 311 }; 312 313 static inline struct net *xs_net(struct xfrm_state *x) 314 { 315 return read_pnet(&x->xs_net); 316 } 317 318 /* xflags - make enum if more show up */ 319 #define XFRM_TIME_DEFER 1 320 #define XFRM_SOFT_EXPIRE 2 321 322 enum { 323 XFRM_STATE_VOID, 324 XFRM_STATE_ACQ, 325 XFRM_STATE_VALID, 326 XFRM_STATE_ERROR, 327 XFRM_STATE_EXPIRED, 328 XFRM_STATE_DEAD 329 }; 330 331 /* callback structure passed from either netlink or pfkey */ 332 struct km_event { 333 union { 334 u32 hard; 335 u32 proto; 336 u32 byid; 337 u32 aevent; 338 u32 type; 339 } data; 340 341 u32 seq; 342 u32 portid; 343 u32 event; 344 struct net *net; 345 }; 346 347 struct xfrm_if_decode_session_result { 348 struct net *net; 349 u32 if_id; 350 }; 351 352 struct xfrm_if_cb { 353 bool (*decode_session)(struct sk_buff *skb, 354 unsigned short family, 355 struct xfrm_if_decode_session_result *res); 356 }; 357 358 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb); 359 void xfrm_if_unregister_cb(void); 360 361 struct xfrm_dst_lookup_params { 362 struct net *net; 363 dscp_t dscp; 364 int oif; 365 xfrm_address_t *saddr; 366 xfrm_address_t *daddr; 367 u32 mark; 368 __u8 ipproto; 369 union flowi_uli uli; 370 }; 371 372 struct net_device; 373 struct xfrm_type; 374 struct xfrm_dst; 375 struct xfrm_policy_afinfo { 376 struct dst_ops *dst_ops; 377 struct dst_entry *(*dst_lookup)(const struct xfrm_dst_lookup_params *params); 378 int (*get_saddr)(xfrm_address_t *saddr, 379 const struct xfrm_dst_lookup_params *params); 380 int (*fill_dst)(struct xfrm_dst *xdst, 381 struct net_device *dev, 382 const struct flowi *fl); 383 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig); 384 }; 385 386 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family); 387 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo); 388 void km_policy_notify(struct xfrm_policy *xp, int dir, 389 const struct km_event *c); 390 void km_state_notify(struct xfrm_state *x, const struct km_event *c); 391 392 struct xfrm_tmpl; 393 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, 394 struct xfrm_policy *pol); 395 void km_state_expired(struct xfrm_state *x, int hard, u32 portid); 396 int __xfrm_state_delete(struct xfrm_state *x); 397 398 struct xfrm_state_afinfo { 399 u8 family; 400 u8 proto; 401 402 const struct xfrm_type_offload *type_offload_esp; 403 404 const struct xfrm_type *type_esp; 405 const struct xfrm_type *type_ipip; 406 const struct xfrm_type *type_ipip6; 407 const struct xfrm_type *type_comp; 408 const struct xfrm_type *type_ah; 409 const struct xfrm_type *type_routing; 410 const struct xfrm_type *type_dstopts; 411 412 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); 413 int (*transport_finish)(struct sk_buff *skb, 414 int async); 415 void (*local_error)(struct sk_buff *skb, u32 mtu); 416 }; 417 418 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); 419 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); 420 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); 421 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family); 422 423 struct xfrm_input_afinfo { 424 u8 family; 425 bool is_ipip; 426 int (*callback)(struct sk_buff *skb, u8 protocol, 427 int err); 428 }; 429 430 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo); 431 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo); 432 433 void xfrm_flush_gc(void); 434 void xfrm_state_delete_tunnel(struct xfrm_state *x); 435 436 struct xfrm_type { 437 struct module *owner; 438 u8 proto; 439 u8 flags; 440 #define XFRM_TYPE_NON_FRAGMENT 1 441 #define XFRM_TYPE_REPLAY_PROT 2 442 #define XFRM_TYPE_LOCAL_COADDR 4 443 #define XFRM_TYPE_REMOTE_COADDR 8 444 445 int (*init_state)(struct xfrm_state *x, 446 struct netlink_ext_ack *extack); 447 void (*destructor)(struct xfrm_state *); 448 int (*input)(struct xfrm_state *, struct sk_buff *skb); 449 int (*output)(struct xfrm_state *, struct sk_buff *pskb); 450 int (*reject)(struct xfrm_state *, struct sk_buff *, 451 const struct flowi *); 452 }; 453 454 int xfrm_register_type(const struct xfrm_type *type, unsigned short family); 455 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family); 456 457 struct xfrm_type_offload { 458 struct module *owner; 459 u8 proto; 460 void (*encap)(struct xfrm_state *, struct sk_buff *pskb); 461 int (*input_tail)(struct xfrm_state *x, struct sk_buff *skb); 462 int (*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features); 463 }; 464 465 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family); 466 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family); 467 468 /** 469 * struct xfrm_mode_cbs - XFRM mode callbacks 470 * @owner: module owner or NULL 471 * @init_state: Add/init mode specific state in `xfrm_state *x` 472 * @clone_state: Copy mode specific values from `orig` to new state `x` 473 * @destroy_state: Cleanup mode specific state from `xfrm_state *x` 474 * @user_init: Process mode specific netlink attributes from user 475 * @copy_to_user: Add netlink attributes to `attrs` based on state in `x` 476 * @sa_len: Return space required to store mode specific netlink attributes 477 * @get_inner_mtu: Return avail payload space after removing encap overhead 478 * @input: Process received packet from SA using mode 479 * @output: Output given packet using mode 480 * @prepare_output: Add mode specific encapsulation to packet in skb. On return 481 * `transport_header` should point at ESP header, `network_header` should 482 * point at outer IP header and `mac_header` should opint at the 483 * protocol/nexthdr field of the outer IP. 484 * 485 * One should examine and understand the specific uses of these callbacks in 486 * xfrm for further detail on how and when these functions are called. RTSL. 487 */ 488 struct xfrm_mode_cbs { 489 struct module *owner; 490 int (*init_state)(struct xfrm_state *x); 491 int (*clone_state)(struct xfrm_state *x, struct xfrm_state *orig); 492 void (*destroy_state)(struct xfrm_state *x); 493 int (*user_init)(struct net *net, struct xfrm_state *x, 494 struct nlattr **attrs, 495 struct netlink_ext_ack *extack); 496 int (*copy_to_user)(struct xfrm_state *x, struct sk_buff *skb); 497 unsigned int (*sa_len)(const struct xfrm_state *x); 498 u32 (*get_inner_mtu)(struct xfrm_state *x, int outer_mtu); 499 int (*input)(struct xfrm_state *x, struct sk_buff *skb); 500 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb); 501 int (*prepare_output)(struct xfrm_state *x, struct sk_buff *skb); 502 }; 503 504 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs); 505 void xfrm_unregister_mode_cbs(u8 mode); 506 507 static inline int xfrm_af2proto(unsigned int family) 508 { 509 switch(family) { 510 case AF_INET: 511 return IPPROTO_IPIP; 512 case AF_INET6: 513 return IPPROTO_IPV6; 514 default: 515 return 0; 516 } 517 } 518 519 static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto) 520 { 521 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) || 522 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6)) 523 return &x->inner_mode; 524 else 525 return &x->inner_mode_iaf; 526 } 527 528 struct xfrm_tmpl { 529 /* id in template is interpreted as: 530 * daddr - destination of tunnel, may be zero for transport mode. 531 * spi - zero to acquire spi. Not zero if spi is static, then 532 * daddr must be fixed too. 533 * proto - AH/ESP/IPCOMP 534 */ 535 struct xfrm_id id; 536 537 /* Source address of tunnel. Ignored, if it is not a tunnel. */ 538 xfrm_address_t saddr; 539 540 unsigned short encap_family; 541 542 u32 reqid; 543 544 /* Mode: transport, tunnel etc. */ 545 u8 mode; 546 547 /* Sharing mode: unique, this session only, this user only etc. */ 548 u8 share; 549 550 /* May skip this transfomration if no SA is found */ 551 u8 optional; 552 553 /* Skip aalgos/ealgos/calgos checks. */ 554 u8 allalgs; 555 556 /* Bit mask of algos allowed for acquisition */ 557 u32 aalgos; 558 u32 ealgos; 559 u32 calgos; 560 }; 561 562 #define XFRM_MAX_DEPTH 6 563 #define XFRM_MAX_OFFLOAD_DEPTH 1 564 565 struct xfrm_policy_walk_entry { 566 struct list_head all; 567 u8 dead; 568 }; 569 570 struct xfrm_policy_walk { 571 struct xfrm_policy_walk_entry walk; 572 u8 type; 573 u32 seq; 574 }; 575 576 struct xfrm_policy_queue { 577 struct sk_buff_head hold_queue; 578 struct timer_list hold_timer; 579 unsigned long timeout; 580 }; 581 582 /** 583 * struct xfrm_policy - xfrm policy 584 * @xp_net: network namespace the policy lives in 585 * @bydst: hlist node for SPD hash table or rbtree list 586 * @byidx: hlist node for index hash table 587 * @state_cache_list: hlist head for policy cached xfrm states 588 * @lock: serialize changes to policy structure members 589 * @refcnt: reference count, freed once it reaches 0 590 * @pos: kernel internal tie-breaker to determine age of policy 591 * @timer: timer 592 * @genid: generation, used to invalidate old policies 593 * @priority: priority, set by userspace 594 * @index: policy index (autogenerated) 595 * @if_id: virtual xfrm interface id 596 * @mark: packet mark 597 * @selector: selector 598 * @lft: liftime configuration data 599 * @curlft: liftime state 600 * @walk: list head on pernet policy list 601 * @polq: queue to hold packets while aqcuire operaion in progress 602 * @bydst_reinsert: policy tree node needs to be merged 603 * @type: XFRM_POLICY_TYPE_MAIN or _SUB 604 * @action: XFRM_POLICY_ALLOW or _BLOCK 605 * @flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP 606 * @xfrm_nr: number of used templates in @xfrm_vec 607 * @family: protocol family 608 * @security: SELinux security label 609 * @xfrm_vec: array of templates to resolve state 610 * @rcu: rcu head, used to defer memory release 611 * @xdo: hardware offload state 612 */ 613 struct xfrm_policy { 614 possible_net_t xp_net; 615 struct hlist_node bydst; 616 struct hlist_node byidx; 617 618 struct hlist_head state_cache_list; 619 620 /* This lock only affects elements except for entry. */ 621 rwlock_t lock; 622 refcount_t refcnt; 623 u32 pos; 624 struct timer_list timer; 625 626 atomic_t genid; 627 u32 priority; 628 u32 index; 629 u32 if_id; 630 struct xfrm_mark mark; 631 struct xfrm_selector selector; 632 struct xfrm_lifetime_cfg lft; 633 struct xfrm_lifetime_cur curlft; 634 struct xfrm_policy_walk_entry walk; 635 struct xfrm_policy_queue polq; 636 bool bydst_reinsert; 637 u8 type; 638 u8 action; 639 u8 flags; 640 u8 xfrm_nr; 641 u16 family; 642 struct xfrm_sec_ctx *security; 643 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH]; 644 struct rcu_head rcu; 645 646 struct xfrm_dev_offload xdo; 647 }; 648 649 static inline struct net *xp_net(const struct xfrm_policy *xp) 650 { 651 return read_pnet(&xp->xp_net); 652 } 653 654 struct xfrm_kmaddress { 655 xfrm_address_t local; 656 xfrm_address_t remote; 657 u32 reserved; 658 u16 family; 659 }; 660 661 struct xfrm_migrate { 662 xfrm_address_t old_daddr; 663 xfrm_address_t old_saddr; 664 xfrm_address_t new_daddr; 665 xfrm_address_t new_saddr; 666 u8 proto; 667 u8 mode; 668 u16 reserved; 669 u32 reqid; 670 u16 old_family; 671 u16 new_family; 672 }; 673 674 #define XFRM_KM_TIMEOUT 30 675 /* what happened */ 676 #define XFRM_REPLAY_UPDATE XFRM_AE_CR 677 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE 678 679 /* default aevent timeout in units of 100ms */ 680 #define XFRM_AE_ETIME 10 681 /* Async Event timer multiplier */ 682 #define XFRM_AE_ETH_M 10 683 /* default seq threshold size */ 684 #define XFRM_AE_SEQT_SIZE 2 685 686 struct xfrm_mgr { 687 struct list_head list; 688 int (*notify)(struct xfrm_state *x, const struct km_event *c); 689 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp); 690 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir); 691 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); 692 int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c); 693 int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr); 694 int (*migrate)(const struct xfrm_selector *sel, 695 u8 dir, u8 type, 696 const struct xfrm_migrate *m, 697 int num_bundles, 698 const struct xfrm_kmaddress *k, 699 const struct xfrm_encap_tmpl *encap); 700 bool (*is_alive)(const struct km_event *c); 701 }; 702 703 void xfrm_register_km(struct xfrm_mgr *km); 704 void xfrm_unregister_km(struct xfrm_mgr *km); 705 706 struct xfrm_tunnel_skb_cb { 707 union { 708 struct inet_skb_parm h4; 709 struct inet6_skb_parm h6; 710 } header; 711 712 union { 713 struct ip_tunnel *ip4; 714 struct ip6_tnl *ip6; 715 } tunnel; 716 }; 717 718 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0])) 719 720 /* 721 * This structure is used for the duration where packets are being 722 * transformed by IPsec. As soon as the packet leaves IPsec the 723 * area beyond the generic IP part may be overwritten. 724 */ 725 struct xfrm_skb_cb { 726 struct xfrm_tunnel_skb_cb header; 727 728 /* Sequence number for replay protection. */ 729 union { 730 struct { 731 __u32 low; 732 __u32 hi; 733 } output; 734 struct { 735 __be32 low; 736 __be32 hi; 737 } input; 738 } seq; 739 }; 740 741 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0])) 742 743 /* 744 * This structure is used by the afinfo prepare_input/prepare_output functions 745 * to transmit header information to the mode input/output functions. 746 */ 747 struct xfrm_mode_skb_cb { 748 struct xfrm_tunnel_skb_cb header; 749 750 /* Copied from header for IPv4, always set to zero and DF for IPv6. */ 751 __be16 id; 752 __be16 frag_off; 753 754 /* IP header length (excluding options or extension headers). */ 755 u8 ihl; 756 757 /* TOS for IPv4, class for IPv6. */ 758 u8 tos; 759 760 /* TTL for IPv4, hop limitfor IPv6. */ 761 u8 ttl; 762 763 /* Protocol for IPv4, NH for IPv6. */ 764 u8 protocol; 765 766 /* Option length for IPv4, zero for IPv6. */ 767 u8 optlen; 768 769 /* Used by IPv6 only, zero for IPv4. */ 770 u8 flow_lbl[3]; 771 }; 772 773 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0])) 774 775 /* 776 * This structure is used by the input processing to locate the SPI and 777 * related information. 778 */ 779 struct xfrm_spi_skb_cb { 780 struct xfrm_tunnel_skb_cb header; 781 782 unsigned int daddroff; 783 unsigned int family; 784 __be32 seq; 785 }; 786 787 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0])) 788 789 #ifdef CONFIG_AUDITSYSCALL 790 static inline struct audit_buffer *xfrm_audit_start(const char *op) 791 { 792 struct audit_buffer *audit_buf = NULL; 793 794 if (audit_enabled == AUDIT_OFF) 795 return NULL; 796 audit_buf = audit_log_start(audit_context(), GFP_ATOMIC, 797 AUDIT_MAC_IPSEC_EVENT); 798 if (audit_buf == NULL) 799 return NULL; 800 audit_log_format(audit_buf, "op=%s", op); 801 return audit_buf; 802 } 803 804 static inline void xfrm_audit_helper_usrinfo(bool task_valid, 805 struct audit_buffer *audit_buf) 806 { 807 const unsigned int auid = from_kuid(&init_user_ns, task_valid ? 808 audit_get_loginuid(current) : 809 INVALID_UID); 810 const unsigned int ses = task_valid ? audit_get_sessionid(current) : 811 AUDIT_SID_UNSET; 812 813 audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses); 814 audit_log_task_context(audit_buf); 815 } 816 817 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid); 818 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 819 bool task_valid); 820 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid); 821 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid); 822 void xfrm_audit_state_replay_overflow(struct xfrm_state *x, 823 struct sk_buff *skb); 824 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb, 825 __be32 net_seq); 826 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family); 827 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi, 828 __be32 net_seq); 829 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb, 830 u8 proto); 831 #else 832 833 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 834 bool task_valid) 835 { 836 } 837 838 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 839 bool task_valid) 840 { 841 } 842 843 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result, 844 bool task_valid) 845 { 846 } 847 848 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result, 849 bool task_valid) 850 { 851 } 852 853 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x, 854 struct sk_buff *skb) 855 { 856 } 857 858 static inline void xfrm_audit_state_replay(struct xfrm_state *x, 859 struct sk_buff *skb, __be32 net_seq) 860 { 861 } 862 863 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb, 864 u16 family) 865 { 866 } 867 868 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, 869 __be32 net_spi, __be32 net_seq) 870 { 871 } 872 873 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x, 874 struct sk_buff *skb, u8 proto) 875 { 876 } 877 #endif /* CONFIG_AUDITSYSCALL */ 878 879 static inline void xfrm_pol_hold(struct xfrm_policy *policy) 880 { 881 if (likely(policy != NULL)) 882 refcount_inc(&policy->refcnt); 883 } 884 885 void xfrm_policy_destroy(struct xfrm_policy *policy); 886 887 static inline void xfrm_pol_put(struct xfrm_policy *policy) 888 { 889 if (refcount_dec_and_test(&policy->refcnt)) 890 xfrm_policy_destroy(policy); 891 } 892 893 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols) 894 { 895 int i; 896 for (i = npols - 1; i >= 0; --i) 897 xfrm_pol_put(pols[i]); 898 } 899 900 void __xfrm_state_destroy(struct xfrm_state *, bool); 901 902 static inline void __xfrm_state_put(struct xfrm_state *x) 903 { 904 refcount_dec(&x->refcnt); 905 } 906 907 static inline void xfrm_state_put(struct xfrm_state *x) 908 { 909 if (refcount_dec_and_test(&x->refcnt)) 910 __xfrm_state_destroy(x, false); 911 } 912 913 static inline void xfrm_state_put_sync(struct xfrm_state *x) 914 { 915 if (refcount_dec_and_test(&x->refcnt)) 916 __xfrm_state_destroy(x, true); 917 } 918 919 static inline void xfrm_state_hold(struct xfrm_state *x) 920 { 921 refcount_inc(&x->refcnt); 922 } 923 924 static inline bool addr_match(const void *token1, const void *token2, 925 unsigned int prefixlen) 926 { 927 const __be32 *a1 = token1; 928 const __be32 *a2 = token2; 929 unsigned int pdw; 930 unsigned int pbi; 931 932 pdw = prefixlen >> 5; /* num of whole u32 in prefix */ 933 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */ 934 935 if (pdw) 936 if (memcmp(a1, a2, pdw << 2)) 937 return false; 938 939 if (pbi) { 940 __be32 mask; 941 942 mask = htonl((0xffffffff) << (32 - pbi)); 943 944 if ((a1[pdw] ^ a2[pdw]) & mask) 945 return false; 946 } 947 948 return true; 949 } 950 951 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen) 952 { 953 /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */ 954 if (sizeof(long) == 4 && prefixlen == 0) 955 return true; 956 return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen))); 957 } 958 959 static __inline__ 960 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli) 961 { 962 __be16 port; 963 switch(fl->flowi_proto) { 964 case IPPROTO_TCP: 965 case IPPROTO_UDP: 966 case IPPROTO_UDPLITE: 967 case IPPROTO_SCTP: 968 port = uli->ports.sport; 969 break; 970 case IPPROTO_ICMP: 971 case IPPROTO_ICMPV6: 972 port = htons(uli->icmpt.type); 973 break; 974 case IPPROTO_MH: 975 port = htons(uli->mht.type); 976 break; 977 case IPPROTO_GRE: 978 port = htons(ntohl(uli->gre_key) >> 16); 979 break; 980 default: 981 port = 0; /*XXX*/ 982 } 983 return port; 984 } 985 986 static __inline__ 987 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli) 988 { 989 __be16 port; 990 switch(fl->flowi_proto) { 991 case IPPROTO_TCP: 992 case IPPROTO_UDP: 993 case IPPROTO_UDPLITE: 994 case IPPROTO_SCTP: 995 port = uli->ports.dport; 996 break; 997 case IPPROTO_ICMP: 998 case IPPROTO_ICMPV6: 999 port = htons(uli->icmpt.code); 1000 break; 1001 case IPPROTO_GRE: 1002 port = htons(ntohl(uli->gre_key) & 0xffff); 1003 break; 1004 default: 1005 port = 0; /*XXX*/ 1006 } 1007 return port; 1008 } 1009 1010 bool xfrm_selector_match(const struct xfrm_selector *sel, 1011 const struct flowi *fl, unsigned short family); 1012 1013 #ifdef CONFIG_SECURITY_NETWORK_XFRM 1014 /* If neither has a context --> match 1015 * Otherwise, both must have a context and the sids, doi, alg must match 1016 */ 1017 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) 1018 { 1019 return ((!s1 && !s2) || 1020 (s1 && s2 && 1021 (s1->ctx_sid == s2->ctx_sid) && 1022 (s1->ctx_doi == s2->ctx_doi) && 1023 (s1->ctx_alg == s2->ctx_alg))); 1024 } 1025 #else 1026 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2) 1027 { 1028 return true; 1029 } 1030 #endif 1031 1032 /* A struct encoding bundle of transformations to apply to some set of flow. 1033 * 1034 * xdst->child points to the next element of bundle. 1035 * dst->xfrm points to an instanse of transformer. 1036 * 1037 * Due to unfortunate limitations of current routing cache, which we 1038 * have no time to fix, it mirrors struct rtable and bound to the same 1039 * routing key, including saddr,daddr. However, we can have many of 1040 * bundles differing by session id. All the bundles grow from a parent 1041 * policy rule. 1042 */ 1043 struct xfrm_dst { 1044 union { 1045 struct dst_entry dst; 1046 struct rtable rt; 1047 struct rt6_info rt6; 1048 } u; 1049 struct dst_entry *route; 1050 struct dst_entry *child; 1051 struct dst_entry *path; 1052 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1053 int num_pols, num_xfrms; 1054 u32 xfrm_genid; 1055 u32 policy_genid; 1056 u32 route_mtu_cached; 1057 u32 child_mtu_cached; 1058 u32 route_cookie; 1059 u32 path_cookie; 1060 }; 1061 1062 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst) 1063 { 1064 #ifdef CONFIG_XFRM 1065 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { 1066 const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst; 1067 1068 return xdst->path; 1069 } 1070 #endif 1071 return (struct dst_entry *) dst; 1072 } 1073 1074 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst) 1075 { 1076 #ifdef CONFIG_XFRM 1077 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) { 1078 struct xfrm_dst *xdst = (struct xfrm_dst *) dst; 1079 return xdst->child; 1080 } 1081 #endif 1082 return NULL; 1083 } 1084 1085 #ifdef CONFIG_XFRM 1086 static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child) 1087 { 1088 xdst->child = child; 1089 } 1090 1091 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) 1092 { 1093 xfrm_pols_put(xdst->pols, xdst->num_pols); 1094 dst_release(xdst->route); 1095 if (likely(xdst->u.dst.xfrm)) 1096 xfrm_state_put(xdst->u.dst.xfrm); 1097 } 1098 #endif 1099 1100 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); 1101 1102 struct xfrm_if_parms { 1103 int link; /* ifindex of underlying L2 interface */ 1104 u32 if_id; /* interface identifier */ 1105 bool collect_md; 1106 }; 1107 1108 struct xfrm_if { 1109 struct xfrm_if __rcu *next; /* next interface in list */ 1110 struct net_device *dev; /* virtual device associated with interface */ 1111 struct net *net; /* netns for packet i/o */ 1112 struct xfrm_if_parms p; /* interface parms */ 1113 1114 struct gro_cells gro_cells; 1115 }; 1116 1117 struct xfrm_offload { 1118 /* Output sequence number for replay protection on offloading. */ 1119 struct { 1120 __u32 low; 1121 __u32 hi; 1122 } seq; 1123 1124 __u32 flags; 1125 #define SA_DELETE_REQ 1 1126 #define CRYPTO_DONE 2 1127 #define CRYPTO_NEXT_DONE 4 1128 #define CRYPTO_FALLBACK 8 1129 #define XFRM_GSO_SEGMENT 16 1130 #define XFRM_GRO 32 1131 /* 64 is free */ 1132 #define XFRM_DEV_RESUME 128 1133 #define XFRM_XMIT 256 1134 1135 __u32 status; 1136 #define CRYPTO_SUCCESS 1 1137 #define CRYPTO_GENERIC_ERROR 2 1138 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED 4 1139 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED 8 1140 #define CRYPTO_TUNNEL_AH_AUTH_FAILED 16 1141 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED 32 1142 #define CRYPTO_INVALID_PACKET_SYNTAX 64 1143 #define CRYPTO_INVALID_PROTOCOL 128 1144 1145 /* Used to keep whole l2 header for transport mode GRO */ 1146 __u32 orig_mac_len; 1147 1148 __u8 proto; 1149 __u8 inner_ipproto; 1150 }; 1151 1152 struct sec_path { 1153 int len; 1154 int olen; 1155 int verified_cnt; 1156 1157 struct xfrm_state *xvec[XFRM_MAX_DEPTH]; 1158 struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH]; 1159 }; 1160 1161 struct sec_path *secpath_set(struct sk_buff *skb); 1162 1163 static inline void 1164 secpath_reset(struct sk_buff *skb) 1165 { 1166 #ifdef CONFIG_XFRM 1167 skb_ext_del(skb, SKB_EXT_SEC_PATH); 1168 #endif 1169 } 1170 1171 static inline int 1172 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family) 1173 { 1174 switch (family) { 1175 case AF_INET: 1176 return addr->a4 == 0; 1177 case AF_INET6: 1178 return ipv6_addr_any(&addr->in6); 1179 } 1180 return 0; 1181 } 1182 1183 static inline int 1184 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x) 1185 { 1186 return (tmpl->saddr.a4 && 1187 tmpl->saddr.a4 != x->props.saddr.a4); 1188 } 1189 1190 static inline int 1191 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x) 1192 { 1193 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) && 1194 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr)); 1195 } 1196 1197 static inline int 1198 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family) 1199 { 1200 switch (family) { 1201 case AF_INET: 1202 return __xfrm4_state_addr_cmp(tmpl, x); 1203 case AF_INET6: 1204 return __xfrm6_state_addr_cmp(tmpl, x); 1205 } 1206 return !0; 1207 } 1208 1209 #ifdef CONFIG_XFRM 1210 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb) 1211 { 1212 struct sec_path *sp = skb_sec_path(skb); 1213 1214 return sp->xvec[sp->len - 1]; 1215 } 1216 #endif 1217 1218 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb) 1219 { 1220 #ifdef CONFIG_XFRM 1221 struct sec_path *sp = skb_sec_path(skb); 1222 1223 if (!sp || !sp->olen || sp->len != sp->olen) 1224 return NULL; 1225 1226 return &sp->ovec[sp->olen - 1]; 1227 #else 1228 return NULL; 1229 #endif 1230 } 1231 1232 #ifdef CONFIG_XFRM 1233 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, 1234 unsigned short family); 1235 1236 static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb, 1237 int dir) 1238 { 1239 if (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) 1240 return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT; 1241 1242 return false; 1243 } 1244 1245 static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb, 1246 int dir, unsigned short family) 1247 { 1248 if (dir != XFRM_POLICY_OUT && family == AF_INET) { 1249 /* same dst may be used for traffic originating from 1250 * devices with different policy settings. 1251 */ 1252 return IPCB(skb)->flags & IPSKB_NOPOLICY; 1253 } 1254 return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY); 1255 } 1256 1257 static inline int __xfrm_policy_check2(struct sock *sk, int dir, 1258 struct sk_buff *skb, 1259 unsigned int family, int reverse) 1260 { 1261 struct net *net = dev_net(skb->dev); 1262 int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0); 1263 struct xfrm_offload *xo = xfrm_offload(skb); 1264 struct xfrm_state *x; 1265 1266 if (sk && sk->sk_policy[XFRM_POLICY_IN]) 1267 return __xfrm_policy_check(sk, ndir, skb, family); 1268 1269 if (xo) { 1270 x = xfrm_input_state(skb); 1271 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) 1272 return (xo->flags & CRYPTO_DONE) && 1273 (xo->status & CRYPTO_SUCCESS); 1274 } 1275 1276 return __xfrm_check_nopolicy(net, skb, dir) || 1277 __xfrm_check_dev_nopolicy(skb, dir, family) || 1278 __xfrm_policy_check(sk, ndir, skb, family); 1279 } 1280 1281 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) 1282 { 1283 return __xfrm_policy_check2(sk, dir, skb, family, 0); 1284 } 1285 1286 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 1287 { 1288 return xfrm_policy_check(sk, dir, skb, AF_INET); 1289 } 1290 1291 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 1292 { 1293 return xfrm_policy_check(sk, dir, skb, AF_INET6); 1294 } 1295 1296 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir, 1297 struct sk_buff *skb) 1298 { 1299 return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1); 1300 } 1301 1302 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, 1303 struct sk_buff *skb) 1304 { 1305 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1); 1306 } 1307 1308 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, 1309 unsigned int family, int reverse); 1310 1311 static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl, 1312 unsigned int family) 1313 { 1314 return __xfrm_decode_session(net, skb, fl, family, 0); 1315 } 1316 1317 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb, 1318 struct flowi *fl, 1319 unsigned int family) 1320 { 1321 return __xfrm_decode_session(net, skb, fl, family, 1); 1322 } 1323 1324 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family); 1325 1326 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family) 1327 { 1328 struct net *net = dev_net(skb->dev); 1329 1330 if (!net->xfrm.policy_count[XFRM_POLICY_OUT] && 1331 net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT) 1332 return true; 1333 1334 return (skb_dst(skb)->flags & DST_NOXFRM) || 1335 __xfrm_route_forward(skb, family); 1336 } 1337 1338 static inline int xfrm4_route_forward(struct sk_buff *skb) 1339 { 1340 return xfrm_route_forward(skb, AF_INET); 1341 } 1342 1343 static inline int xfrm6_route_forward(struct sk_buff *skb) 1344 { 1345 return xfrm_route_forward(skb, AF_INET6); 1346 } 1347 1348 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk); 1349 1350 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) 1351 { 1352 if (!sk_fullsock(osk)) 1353 return 0; 1354 sk->sk_policy[0] = NULL; 1355 sk->sk_policy[1] = NULL; 1356 if (unlikely(osk->sk_policy[0] || osk->sk_policy[1])) 1357 return __xfrm_sk_clone_policy(sk, osk); 1358 return 0; 1359 } 1360 1361 int xfrm_policy_delete(struct xfrm_policy *pol, int dir); 1362 1363 static inline void xfrm_sk_free_policy(struct sock *sk) 1364 { 1365 struct xfrm_policy *pol; 1366 1367 pol = rcu_dereference_protected(sk->sk_policy[0], 1); 1368 if (unlikely(pol != NULL)) { 1369 xfrm_policy_delete(pol, XFRM_POLICY_MAX); 1370 sk->sk_policy[0] = NULL; 1371 } 1372 pol = rcu_dereference_protected(sk->sk_policy[1], 1); 1373 if (unlikely(pol != NULL)) { 1374 xfrm_policy_delete(pol, XFRM_POLICY_MAX+1); 1375 sk->sk_policy[1] = NULL; 1376 } 1377 } 1378 1379 #else 1380 1381 static inline void xfrm_sk_free_policy(struct sock *sk) {} 1382 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; } 1383 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; } 1384 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; } 1385 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 1386 { 1387 return 1; 1388 } 1389 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb) 1390 { 1391 return 1; 1392 } 1393 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family) 1394 { 1395 return 1; 1396 } 1397 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb, 1398 struct flowi *fl, 1399 unsigned int family) 1400 { 1401 return -ENOSYS; 1402 } 1403 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir, 1404 struct sk_buff *skb) 1405 { 1406 return 1; 1407 } 1408 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir, 1409 struct sk_buff *skb) 1410 { 1411 return 1; 1412 } 1413 #endif 1414 1415 static __inline__ 1416 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family) 1417 { 1418 switch (family){ 1419 case AF_INET: 1420 return (xfrm_address_t *)&fl->u.ip4.daddr; 1421 case AF_INET6: 1422 return (xfrm_address_t *)&fl->u.ip6.daddr; 1423 } 1424 return NULL; 1425 } 1426 1427 static __inline__ 1428 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family) 1429 { 1430 switch (family){ 1431 case AF_INET: 1432 return (xfrm_address_t *)&fl->u.ip4.saddr; 1433 case AF_INET6: 1434 return (xfrm_address_t *)&fl->u.ip6.saddr; 1435 } 1436 return NULL; 1437 } 1438 1439 static __inline__ 1440 void xfrm_flowi_addr_get(const struct flowi *fl, 1441 xfrm_address_t *saddr, xfrm_address_t *daddr, 1442 unsigned short family) 1443 { 1444 switch(family) { 1445 case AF_INET: 1446 memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4)); 1447 memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4)); 1448 break; 1449 case AF_INET6: 1450 saddr->in6 = fl->u.ip6.saddr; 1451 daddr->in6 = fl->u.ip6.daddr; 1452 break; 1453 } 1454 } 1455 1456 static __inline__ int 1457 __xfrm4_state_addr_check(const struct xfrm_state *x, 1458 const xfrm_address_t *daddr, const xfrm_address_t *saddr) 1459 { 1460 if (daddr->a4 == x->id.daddr.a4 && 1461 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4)) 1462 return 1; 1463 return 0; 1464 } 1465 1466 static __inline__ int 1467 __xfrm6_state_addr_check(const struct xfrm_state *x, 1468 const xfrm_address_t *daddr, const xfrm_address_t *saddr) 1469 { 1470 if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) && 1471 (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) || 1472 ipv6_addr_any((struct in6_addr *)saddr) || 1473 ipv6_addr_any((struct in6_addr *)&x->props.saddr))) 1474 return 1; 1475 return 0; 1476 } 1477 1478 static __inline__ int 1479 xfrm_state_addr_check(const struct xfrm_state *x, 1480 const xfrm_address_t *daddr, const xfrm_address_t *saddr, 1481 unsigned short family) 1482 { 1483 switch (family) { 1484 case AF_INET: 1485 return __xfrm4_state_addr_check(x, daddr, saddr); 1486 case AF_INET6: 1487 return __xfrm6_state_addr_check(x, daddr, saddr); 1488 } 1489 return 0; 1490 } 1491 1492 static __inline__ int 1493 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl, 1494 unsigned short family) 1495 { 1496 switch (family) { 1497 case AF_INET: 1498 return __xfrm4_state_addr_check(x, 1499 (const xfrm_address_t *)&fl->u.ip4.daddr, 1500 (const xfrm_address_t *)&fl->u.ip4.saddr); 1501 case AF_INET6: 1502 return __xfrm6_state_addr_check(x, 1503 (const xfrm_address_t *)&fl->u.ip6.daddr, 1504 (const xfrm_address_t *)&fl->u.ip6.saddr); 1505 } 1506 return 0; 1507 } 1508 1509 static inline int xfrm_state_kern(const struct xfrm_state *x) 1510 { 1511 return atomic_read(&x->tunnel_users); 1512 } 1513 1514 static inline bool xfrm_id_proto_valid(u8 proto) 1515 { 1516 switch (proto) { 1517 case IPPROTO_AH: 1518 case IPPROTO_ESP: 1519 case IPPROTO_COMP: 1520 #if IS_ENABLED(CONFIG_IPV6) 1521 case IPPROTO_ROUTING: 1522 case IPPROTO_DSTOPTS: 1523 #endif 1524 return true; 1525 default: 1526 return false; 1527 } 1528 } 1529 1530 /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */ 1531 static inline int xfrm_id_proto_match(u8 proto, u8 userproto) 1532 { 1533 return (!userproto || proto == userproto || 1534 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH || 1535 proto == IPPROTO_ESP || 1536 proto == IPPROTO_COMP))); 1537 } 1538 1539 /* 1540 * xfrm algorithm information 1541 */ 1542 struct xfrm_algo_aead_info { 1543 char *geniv; 1544 u16 icv_truncbits; 1545 }; 1546 1547 struct xfrm_algo_auth_info { 1548 u16 icv_truncbits; 1549 u16 icv_fullbits; 1550 }; 1551 1552 struct xfrm_algo_encr_info { 1553 char *geniv; 1554 u16 blockbits; 1555 u16 defkeybits; 1556 }; 1557 1558 struct xfrm_algo_comp_info { 1559 u16 threshold; 1560 }; 1561 1562 struct xfrm_algo_desc { 1563 char *name; 1564 char *compat; 1565 u8 available:1; 1566 u8 pfkey_supported:1; 1567 union { 1568 struct xfrm_algo_aead_info aead; 1569 struct xfrm_algo_auth_info auth; 1570 struct xfrm_algo_encr_info encr; 1571 struct xfrm_algo_comp_info comp; 1572 } uinfo; 1573 struct sadb_alg desc; 1574 }; 1575 1576 /* XFRM protocol handlers. */ 1577 struct xfrm4_protocol { 1578 int (*handler)(struct sk_buff *skb); 1579 int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi, 1580 int encap_type); 1581 int (*cb_handler)(struct sk_buff *skb, int err); 1582 int (*err_handler)(struct sk_buff *skb, u32 info); 1583 1584 struct xfrm4_protocol __rcu *next; 1585 int priority; 1586 }; 1587 1588 struct xfrm6_protocol { 1589 int (*handler)(struct sk_buff *skb); 1590 int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi, 1591 int encap_type); 1592 int (*cb_handler)(struct sk_buff *skb, int err); 1593 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 1594 u8 type, u8 code, int offset, __be32 info); 1595 1596 struct xfrm6_protocol __rcu *next; 1597 int priority; 1598 }; 1599 1600 /* XFRM tunnel handlers. */ 1601 struct xfrm_tunnel { 1602 int (*handler)(struct sk_buff *skb); 1603 int (*cb_handler)(struct sk_buff *skb, int err); 1604 int (*err_handler)(struct sk_buff *skb, u32 info); 1605 1606 struct xfrm_tunnel __rcu *next; 1607 int priority; 1608 }; 1609 1610 struct xfrm6_tunnel { 1611 int (*handler)(struct sk_buff *skb); 1612 int (*cb_handler)(struct sk_buff *skb, int err); 1613 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, 1614 u8 type, u8 code, int offset, __be32 info); 1615 struct xfrm6_tunnel __rcu *next; 1616 int priority; 1617 }; 1618 1619 void xfrm_init(void); 1620 void xfrm4_init(void); 1621 int xfrm_state_init(struct net *net); 1622 void xfrm_state_fini(struct net *net); 1623 void xfrm4_state_init(void); 1624 void xfrm4_protocol_init(void); 1625 #ifdef CONFIG_XFRM 1626 int xfrm6_init(void); 1627 void xfrm6_fini(void); 1628 int xfrm6_state_init(void); 1629 void xfrm6_state_fini(void); 1630 int xfrm6_protocol_init(void); 1631 void xfrm6_protocol_fini(void); 1632 #else 1633 static inline int xfrm6_init(void) 1634 { 1635 return 0; 1636 } 1637 static inline void xfrm6_fini(void) 1638 { 1639 ; 1640 } 1641 #endif 1642 1643 #ifdef CONFIG_XFRM_STATISTICS 1644 int xfrm_proc_init(struct net *net); 1645 void xfrm_proc_fini(struct net *net); 1646 #endif 1647 1648 int xfrm_sysctl_init(struct net *net); 1649 #ifdef CONFIG_SYSCTL 1650 void xfrm_sysctl_fini(struct net *net); 1651 #else 1652 static inline void xfrm_sysctl_fini(struct net *net) 1653 { 1654 } 1655 #endif 1656 1657 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto, 1658 struct xfrm_address_filter *filter); 1659 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk, 1660 int (*func)(struct xfrm_state *, int, void*), void *); 1661 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net); 1662 struct xfrm_state *xfrm_state_alloc(struct net *net); 1663 void xfrm_state_free(struct xfrm_state *x); 1664 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr, 1665 const xfrm_address_t *saddr, 1666 const struct flowi *fl, 1667 struct xfrm_tmpl *tmpl, 1668 struct xfrm_policy *pol, int *err, 1669 unsigned short family, u32 if_id); 1670 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id, 1671 xfrm_address_t *daddr, 1672 xfrm_address_t *saddr, 1673 unsigned short family, 1674 u8 mode, u8 proto, u32 reqid); 1675 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi, 1676 unsigned short family); 1677 int xfrm_state_check_expire(struct xfrm_state *x); 1678 void xfrm_state_update_stats(struct net *net); 1679 #ifdef CONFIG_XFRM_OFFLOAD 1680 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) 1681 { 1682 struct xfrm_dev_offload *xdo = &x->xso; 1683 struct net_device *dev = READ_ONCE(xdo->dev); 1684 1685 if (dev && dev->xfrmdev_ops && 1686 dev->xfrmdev_ops->xdo_dev_state_update_stats) 1687 dev->xfrmdev_ops->xdo_dev_state_update_stats(x); 1688 1689 } 1690 #else 1691 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {} 1692 #endif 1693 void xfrm_state_insert(struct xfrm_state *x); 1694 int xfrm_state_add(struct xfrm_state *x); 1695 int xfrm_state_update(struct xfrm_state *x); 1696 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark, 1697 const xfrm_address_t *daddr, __be32 spi, 1698 u8 proto, unsigned short family); 1699 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark, 1700 const xfrm_address_t *daddr, 1701 __be32 spi, u8 proto, 1702 unsigned short family); 1703 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark, 1704 const xfrm_address_t *daddr, 1705 const xfrm_address_t *saddr, 1706 u8 proto, 1707 unsigned short family); 1708 #ifdef CONFIG_XFRM_SUB_POLICY 1709 void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n, 1710 unsigned short family); 1711 void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n, 1712 unsigned short family); 1713 #else 1714 static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s, 1715 int n, unsigned short family) 1716 { 1717 } 1718 1719 static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s, 1720 int n, unsigned short family) 1721 { 1722 } 1723 #endif 1724 1725 struct xfrmk_sadinfo { 1726 u32 sadhcnt; /* current hash bkts */ 1727 u32 sadhmcnt; /* max allowed hash bkts */ 1728 u32 sadcnt; /* current running count */ 1729 }; 1730 1731 struct xfrmk_spdinfo { 1732 u32 incnt; 1733 u32 outcnt; 1734 u32 fwdcnt; 1735 u32 inscnt; 1736 u32 outscnt; 1737 u32 fwdscnt; 1738 u32 spdhcnt; 1739 u32 spdhmcnt; 1740 }; 1741 1742 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num); 1743 int xfrm_state_delete(struct xfrm_state *x); 1744 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync); 1745 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); 1746 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev, 1747 bool task_valid); 1748 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); 1749 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); 1750 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); 1751 int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack); 1752 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu); 1753 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, 1754 struct netlink_ext_ack *extack); 1755 int xfrm_init_state(struct xfrm_state *x); 1756 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); 1757 int xfrm_input_resume(struct sk_buff *skb, int nexthdr); 1758 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb, 1759 int (*finish)(struct net *, struct sock *, 1760 struct sk_buff *)); 1761 int xfrm_trans_queue(struct sk_buff *skb, 1762 int (*finish)(struct net *, struct sock *, 1763 struct sk_buff *)); 1764 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err); 1765 int xfrm_output(struct sock *sk, struct sk_buff *skb); 1766 1767 #if IS_ENABLED(CONFIG_NET_PKTGEN) 1768 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb); 1769 #endif 1770 1771 void xfrm_local_error(struct sk_buff *skb, int mtu); 1772 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, 1773 int encap_type); 1774 int xfrm4_transport_finish(struct sk_buff *skb, int async); 1775 int xfrm4_rcv(struct sk_buff *skb); 1776 1777 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) 1778 { 1779 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; 1780 XFRM_SPI_SKB_CB(skb)->family = AF_INET; 1781 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); 1782 return xfrm_input(skb, nexthdr, spi, 0); 1783 } 1784 1785 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb); 1786 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol); 1787 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol); 1788 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family); 1789 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); 1790 void xfrm4_local_error(struct sk_buff *skb, u32 mtu); 1791 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi, 1792 struct ip6_tnl *t); 1793 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, 1794 int encap_type); 1795 int xfrm6_transport_finish(struct sk_buff *skb, int async); 1796 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t); 1797 int xfrm6_rcv(struct sk_buff *skb); 1798 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, 1799 xfrm_address_t *saddr, u8 proto); 1800 void xfrm6_local_error(struct sk_buff *skb, u32 mtu); 1801 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol); 1802 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol); 1803 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family); 1804 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family); 1805 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); 1806 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr); 1807 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb); 1808 1809 #ifdef CONFIG_XFRM 1810 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu); 1811 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); 1812 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); 1813 struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, 1814 struct sk_buff *skb); 1815 struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head, 1816 struct sk_buff *skb); 1817 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, 1818 int optlen); 1819 #else 1820 static inline int xfrm_user_policy(struct sock *sk, int optname, 1821 sockptr_t optval, int optlen) 1822 { 1823 return -ENOPROTOOPT; 1824 } 1825 #endif 1826 1827 struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params); 1828 1829 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp); 1830 1831 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type); 1832 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 1833 int (*func)(struct xfrm_policy *, int, int, void*), 1834 void *); 1835 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net); 1836 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl); 1837 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, 1838 const struct xfrm_mark *mark, 1839 u32 if_id, u8 type, int dir, 1840 struct xfrm_selector *sel, 1841 struct xfrm_sec_ctx *ctx, int delete, 1842 int *err); 1843 struct xfrm_policy *xfrm_policy_byid(struct net *net, 1844 const struct xfrm_mark *mark, u32 if_id, 1845 u8 type, int dir, u32 id, int delete, 1846 int *err); 1847 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid); 1848 void xfrm_policy_hash_rebuild(struct net *net); 1849 u32 xfrm_get_acqseq(void); 1850 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack); 1851 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi, 1852 struct netlink_ext_ack *extack); 1853 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, 1854 u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto, 1855 const xfrm_address_t *daddr, 1856 const xfrm_address_t *saddr, int create, 1857 unsigned short family); 1858 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); 1859 1860 #ifdef CONFIG_XFRM_MIGRATE 1861 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 1862 const struct xfrm_migrate *m, int num_bundles, 1863 const struct xfrm_kmaddress *k, 1864 const struct xfrm_encap_tmpl *encap); 1865 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net, 1866 u32 if_id); 1867 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x, 1868 struct xfrm_migrate *m, 1869 struct xfrm_encap_tmpl *encap); 1870 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 1871 struct xfrm_migrate *m, int num_bundles, 1872 struct xfrm_kmaddress *k, struct net *net, 1873 struct xfrm_encap_tmpl *encap, u32 if_id, 1874 struct netlink_ext_ack *extack); 1875 #endif 1876 1877 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport); 1878 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid); 1879 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, 1880 xfrm_address_t *addr); 1881 1882 void xfrm_input_init(void); 1883 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq); 1884 1885 void xfrm_probe_algs(void); 1886 int xfrm_count_pfkey_auth_supported(void); 1887 int xfrm_count_pfkey_enc_supported(void); 1888 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx); 1889 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx); 1890 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id); 1891 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id); 1892 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id); 1893 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe); 1894 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe); 1895 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe); 1896 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len, 1897 int probe); 1898 1899 static inline bool xfrm6_addr_equal(const xfrm_address_t *a, 1900 const xfrm_address_t *b) 1901 { 1902 return ipv6_addr_equal((const struct in6_addr *)a, 1903 (const struct in6_addr *)b); 1904 } 1905 1906 static inline bool xfrm_addr_equal(const xfrm_address_t *a, 1907 const xfrm_address_t *b, 1908 sa_family_t family) 1909 { 1910 switch (family) { 1911 default: 1912 case AF_INET: 1913 return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0; 1914 case AF_INET6: 1915 return xfrm6_addr_equal(a, b); 1916 } 1917 } 1918 1919 static inline int xfrm_policy_id2dir(u32 index) 1920 { 1921 return index & 7; 1922 } 1923 1924 #ifdef CONFIG_XFRM 1925 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq); 1926 int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); 1927 void xfrm_replay_notify(struct xfrm_state *x, int event); 1928 int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb); 1929 int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); 1930 1931 static inline int xfrm_aevent_is_on(struct net *net) 1932 { 1933 struct sock *nlsk; 1934 int ret = 0; 1935 1936 rcu_read_lock(); 1937 nlsk = rcu_dereference(net->xfrm.nlsk); 1938 if (nlsk) 1939 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS); 1940 rcu_read_unlock(); 1941 return ret; 1942 } 1943 1944 static inline int xfrm_acquire_is_on(struct net *net) 1945 { 1946 struct sock *nlsk; 1947 int ret = 0; 1948 1949 rcu_read_lock(); 1950 nlsk = rcu_dereference(net->xfrm.nlsk); 1951 if (nlsk) 1952 ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE); 1953 rcu_read_unlock(); 1954 1955 return ret; 1956 } 1957 #endif 1958 1959 static inline unsigned int aead_len(struct xfrm_algo_aead *alg) 1960 { 1961 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 1962 } 1963 1964 static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg) 1965 { 1966 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 1967 } 1968 1969 static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg) 1970 { 1971 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 1972 } 1973 1974 static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn) 1975 { 1976 return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32); 1977 } 1978 1979 #ifdef CONFIG_XFRM_MIGRATE 1980 static inline int xfrm_replay_clone(struct xfrm_state *x, 1981 struct xfrm_state *orig) 1982 { 1983 1984 x->replay_esn = kmemdup(orig->replay_esn, 1985 xfrm_replay_state_esn_len(orig->replay_esn), 1986 GFP_KERNEL); 1987 if (!x->replay_esn) 1988 return -ENOMEM; 1989 x->preplay_esn = kmemdup(orig->preplay_esn, 1990 xfrm_replay_state_esn_len(orig->preplay_esn), 1991 GFP_KERNEL); 1992 if (!x->preplay_esn) 1993 return -ENOMEM; 1994 1995 return 0; 1996 } 1997 1998 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig) 1999 { 2000 return kmemdup(orig, aead_len(orig), GFP_KERNEL); 2001 } 2002 2003 2004 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig) 2005 { 2006 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL); 2007 } 2008 2009 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig) 2010 { 2011 return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL); 2012 } 2013 2014 static inline void xfrm_states_put(struct xfrm_state **states, int n) 2015 { 2016 int i; 2017 for (i = 0; i < n; i++) 2018 xfrm_state_put(*(states + i)); 2019 } 2020 2021 static inline void xfrm_states_delete(struct xfrm_state **states, int n) 2022 { 2023 int i; 2024 for (i = 0; i < n; i++) 2025 xfrm_state_delete(*(states + i)); 2026 } 2027 #endif 2028 2029 void __init xfrm_dev_init(void); 2030 2031 #ifdef CONFIG_XFRM_OFFLOAD 2032 void xfrm_dev_resume(struct sk_buff *skb); 2033 void xfrm_dev_backlog(struct softnet_data *sd); 2034 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again); 2035 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, 2036 struct xfrm_user_offload *xuo, 2037 struct netlink_ext_ack *extack); 2038 int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, 2039 struct xfrm_user_offload *xuo, u8 dir, 2040 struct netlink_ext_ack *extack); 2041 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); 2042 void xfrm_dev_state_delete(struct xfrm_state *x); 2043 void xfrm_dev_state_free(struct xfrm_state *x); 2044 2045 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x) 2046 { 2047 struct xfrm_dev_offload *xso = &x->xso; 2048 struct net_device *dev = READ_ONCE(xso->dev); 2049 2050 if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn) 2051 dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); 2052 } 2053 2054 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) 2055 { 2056 struct xfrm_state *x = dst->xfrm; 2057 struct xfrm_dst *xdst; 2058 2059 if (!x || !x->type_offload) 2060 return false; 2061 2062 xdst = (struct xfrm_dst *) dst; 2063 if (!x->xso.offload_handle && !xdst->child->xfrm) 2064 return true; 2065 if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) && 2066 !xdst->child->xfrm) 2067 return true; 2068 2069 return false; 2070 } 2071 2072 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x) 2073 { 2074 struct xfrm_dev_offload *xdo = &x->xdo; 2075 struct net_device *dev = xdo->dev; 2076 2077 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete) 2078 dev->xfrmdev_ops->xdo_dev_policy_delete(x); 2079 } 2080 2081 static inline void xfrm_dev_policy_free(struct xfrm_policy *x) 2082 { 2083 struct xfrm_dev_offload *xdo = &x->xdo; 2084 struct net_device *dev = xdo->dev; 2085 2086 if (dev && dev->xfrmdev_ops) { 2087 if (dev->xfrmdev_ops->xdo_dev_policy_free) 2088 dev->xfrmdev_ops->xdo_dev_policy_free(x); 2089 xdo->dev = NULL; 2090 netdev_put(dev, &xdo->dev_tracker); 2091 } 2092 } 2093 #else 2094 static inline void xfrm_dev_resume(struct sk_buff *skb) 2095 { 2096 } 2097 2098 static inline void xfrm_dev_backlog(struct softnet_data *sd) 2099 { 2100 } 2101 2102 static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again) 2103 { 2104 return skb; 2105 } 2106 2107 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack) 2108 { 2109 return 0; 2110 } 2111 2112 static inline void xfrm_dev_state_delete(struct xfrm_state *x) 2113 { 2114 } 2115 2116 static inline void xfrm_dev_state_free(struct xfrm_state *x) 2117 { 2118 } 2119 2120 static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, 2121 struct xfrm_user_offload *xuo, u8 dir, 2122 struct netlink_ext_ack *extack) 2123 { 2124 return 0; 2125 } 2126 2127 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x) 2128 { 2129 } 2130 2131 static inline void xfrm_dev_policy_free(struct xfrm_policy *x) 2132 { 2133 } 2134 2135 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) 2136 { 2137 return false; 2138 } 2139 2140 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x) 2141 { 2142 } 2143 2144 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) 2145 { 2146 return false; 2147 } 2148 #endif 2149 2150 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) 2151 { 2152 if (attrs[XFRMA_MARK]) 2153 memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark)); 2154 else 2155 m->v = m->m = 0; 2156 2157 return m->v & m->m; 2158 } 2159 2160 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m) 2161 { 2162 int ret = 0; 2163 2164 if (m->m | m->v) 2165 ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m); 2166 return ret; 2167 } 2168 2169 static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x) 2170 { 2171 struct xfrm_mark *m = &x->props.smark; 2172 2173 return (m->v & m->m) | (mark & ~m->m); 2174 } 2175 2176 static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id) 2177 { 2178 int ret = 0; 2179 2180 if (if_id) 2181 ret = nla_put_u32(skb, XFRMA_IF_ID, if_id); 2182 return ret; 2183 } 2184 2185 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x, 2186 unsigned int family) 2187 { 2188 bool tunnel = false; 2189 2190 switch(family) { 2191 case AF_INET: 2192 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) 2193 tunnel = true; 2194 break; 2195 case AF_INET6: 2196 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6) 2197 tunnel = true; 2198 break; 2199 } 2200 if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL)) 2201 return -EINVAL; 2202 2203 return 0; 2204 } 2205 2206 extern const int xfrm_msg_min[XFRM_NR_MSGTYPES]; 2207 extern const struct nla_policy xfrma_policy[XFRMA_MAX+1]; 2208 2209 struct xfrm_translator { 2210 /* Allocate frag_list and put compat translation there */ 2211 int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src); 2212 2213 /* Allocate nlmsg with 64-bit translaton of received 32-bit message */ 2214 struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh, 2215 int maxtype, const struct nla_policy *policy, 2216 struct netlink_ext_ack *extack); 2217 2218 /* Translate 32-bit user_policy from sockptr */ 2219 int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen); 2220 2221 struct module *owner; 2222 }; 2223 2224 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT) 2225 extern int xfrm_register_translator(struct xfrm_translator *xtr); 2226 extern int xfrm_unregister_translator(struct xfrm_translator *xtr); 2227 extern struct xfrm_translator *xfrm_get_translator(void); 2228 extern void xfrm_put_translator(struct xfrm_translator *xtr); 2229 #else 2230 static inline struct xfrm_translator *xfrm_get_translator(void) 2231 { 2232 return NULL; 2233 } 2234 static inline void xfrm_put_translator(struct xfrm_translator *xtr) 2235 { 2236 } 2237 #endif 2238 2239 #if IS_ENABLED(CONFIG_IPV6) 2240 static inline bool xfrm6_local_dontfrag(const struct sock *sk) 2241 { 2242 int proto; 2243 2244 if (!sk || sk->sk_family != AF_INET6) 2245 return false; 2246 2247 proto = sk->sk_protocol; 2248 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW) 2249 return inet6_test_bit(DONTFRAG, sk); 2250 2251 return false; 2252 } 2253 #endif 2254 2255 #if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \ 2256 (IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) 2257 2258 extern struct metadata_dst __percpu *xfrm_bpf_md_dst; 2259 2260 int register_xfrm_interface_bpf(void); 2261 2262 #else 2263 2264 static inline int register_xfrm_interface_bpf(void) 2265 { 2266 return 0; 2267 } 2268 2269 #endif 2270 2271 #if IS_ENABLED(CONFIG_DEBUG_INFO_BTF) 2272 int register_xfrm_state_bpf(void); 2273 #else 2274 static inline int register_xfrm_state_bpf(void) 2275 { 2276 return 0; 2277 } 2278 #endif 2279 2280 int xfrm_nat_keepalive_init(unsigned short family); 2281 void xfrm_nat_keepalive_fini(unsigned short family); 2282 int xfrm_nat_keepalive_net_init(struct net *net); 2283 int xfrm_nat_keepalive_net_fini(struct net *net); 2284 void xfrm_nat_keepalive_state_updated(struct xfrm_state *x); 2285 2286 #endif /* _NET_XFRM_H */ 2287