1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _NET_XFRM_H
3 #define _NET_XFRM_H
4
5 #include <linux/compiler.h>
6 #include <linux/xfrm.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/ipsec.h>
13 #include <linux/in6.h>
14 #include <linux/mutex.h>
15 #include <linux/audit.h>
16 #include <linux/slab.h>
17 #include <linux/refcount.h>
18 #include <linux/sockptr.h>
19
20 #include <net/sock.h>
21 #include <net/dst.h>
22 #include <net/inet_dscp.h>
23 #include <net/ip.h>
24 #include <net/route.h>
25 #include <net/ipv6.h>
26 #include <net/ip6_fib.h>
27 #include <net/flow.h>
28 #include <net/gro_cells.h>
29
30 #include <linux/interrupt.h>
31
32 #ifdef CONFIG_XFRM_STATISTICS
33 #include <net/snmp.h>
34 #endif
35
36 #define XFRM_PROTO_ESP 50
37 #define XFRM_PROTO_AH 51
38 #define XFRM_PROTO_COMP 108
39 #define XFRM_PROTO_IPIP 4
40 #define XFRM_PROTO_IPV6 41
41 #define XFRM_PROTO_IPTFS IPPROTO_AGGFRAG
42 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING
43 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
44
45 #define XFRM_ALIGN4(len) (((len) + 3) & ~3)
46 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
47 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
48 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
49 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
50 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
51 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
52 MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
53
54 #ifdef CONFIG_XFRM_STATISTICS
55 #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
56 #define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
57 #else
58 #define XFRM_INC_STATS(net, field) ((void)(net))
59 #define XFRM_ADD_STATS(net, field, val) ((void)(net))
60 #endif
61
62
63 /* Organization of SPD aka "XFRM rules"
64 ------------------------------------
65
66 Basic objects:
67 - policy rule, struct xfrm_policy (=SPD entry)
68 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
69 - instance of a transformer, struct xfrm_state (=SA)
70 - template to clone xfrm_state, struct xfrm_tmpl
71
72 SPD is organized as hash table (for policies that meet minimum address prefix
73 length setting, net->xfrm.policy_hthresh). Other policies are stored in
74 lists, sorted into rbtree ordered by destination and source address networks.
75 See net/xfrm/xfrm_policy.c for details.
76
77 (To be compatible with existing pfkeyv2 implementations,
78 many rules with priority of 0x7fffffff are allowed to exist and
79 such rules are ordered in an unpredictable way, thanks to bsd folks.)
80
81 If "action" is "block", then we prohibit the flow, otherwise:
82 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
83 policy entry has list of up to XFRM_MAX_DEPTH transformations,
84 described by templates xfrm_tmpl. Each template is resolved
85 to a complete xfrm_state (see below) and we pack bundle of transformations
86 to a dst_entry returned to requester.
87
88 dst -. xfrm .-> xfrm_state #1
89 |---. child .-> dst -. xfrm .-> xfrm_state #2
90 |---. child .-> dst -. xfrm .-> xfrm_state #3
91 |---. child .-> NULL
92
93
94 Resolution of xrfm_tmpl
95 -----------------------
96 Template contains:
97 1. ->mode Mode: transport or tunnel
98 2. ->id.proto Protocol: AH/ESP/IPCOMP
99 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
100 Q: allow to resolve security gateway?
101 4. ->id.spi If not zero, static SPI.
102 5. ->saddr Local tunnel endpoint, ignored for transport mode.
103 6. ->algos List of allowed algos. Plain bitmask now.
104 Q: ealgos, aalgos, calgos. What a mess...
105 7. ->share Sharing mode.
106 Q: how to implement private sharing mode? To add struct sock* to
107 flow id?
108
109 Having this template we search through SAD searching for entries
110 with appropriate mode/proto/algo, permitted by selector.
111 If no appropriate entry found, it is requested from key manager.
112
113 PROBLEMS:
114 Q: How to find all the bundles referring to a physical path for
115 PMTU discovery? Seems, dst should contain list of all parents...
116 and enter to infinite locking hierarchy disaster.
117 No! It is easier, we will not search for them, let them find us.
118 We add genid to each dst plus pointer to genid of raw IP route,
119 pmtu disc will update pmtu on raw IP route and increase its genid.
120 dst_check() will see this for top level and trigger resyncing
121 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
122 */
123
124 struct xfrm_state_walk {
125 struct list_head all;
126 u8 state;
127 u8 dying;
128 u8 proto;
129 u32 seq;
130 struct xfrm_address_filter *filter;
131 };
132
133 enum {
134 XFRM_DEV_OFFLOAD_IN = 1,
135 XFRM_DEV_OFFLOAD_OUT,
136 XFRM_DEV_OFFLOAD_FWD,
137 };
138
139 enum {
140 XFRM_DEV_OFFLOAD_UNSPECIFIED,
141 XFRM_DEV_OFFLOAD_CRYPTO,
142 XFRM_DEV_OFFLOAD_PACKET,
143 };
144
145 enum {
146 XFRM_DEV_OFFLOAD_FLAG_ACQ = 1,
147 };
148
149 struct xfrm_dev_offload {
150 struct net_device *dev;
151 netdevice_tracker dev_tracker;
152 struct net_device *real_dev;
153 unsigned long offload_handle;
154 u8 dir : 2;
155 u8 type : 2;
156 u8 flags : 2;
157 };
158
159 struct xfrm_mode {
160 u8 encap;
161 u8 family;
162 u8 flags;
163 };
164
165 /* Flags for xfrm_mode. */
166 enum {
167 XFRM_MODE_FLAG_TUNNEL = 1,
168 };
169
170 enum xfrm_replay_mode {
171 XFRM_REPLAY_MODE_LEGACY,
172 XFRM_REPLAY_MODE_BMP,
173 XFRM_REPLAY_MODE_ESN,
174 };
175
176 /* Full description of state of transformer. */
177 struct xfrm_state {
178 possible_net_t xs_net;
179 union {
180 struct hlist_node gclist;
181 struct hlist_node bydst;
182 };
183 union {
184 struct hlist_node dev_gclist;
185 struct hlist_node bysrc;
186 };
187 struct hlist_node byspi;
188 struct hlist_node byseq;
189 struct hlist_node state_cache;
190 struct hlist_node state_cache_input;
191
192 refcount_t refcnt;
193 spinlock_t lock;
194
195 u32 pcpu_num;
196 struct xfrm_id id;
197 struct xfrm_selector sel;
198 struct xfrm_mark mark;
199 u32 if_id;
200 u32 tfcpad;
201
202 u32 genid;
203
204 /* Key manager bits */
205 struct xfrm_state_walk km;
206
207 /* Parameters of this state. */
208 struct {
209 u32 reqid;
210 u8 mode;
211 u8 replay_window;
212 u8 aalgo, ealgo, calgo;
213 u8 flags;
214 u16 family;
215 xfrm_address_t saddr;
216 int header_len;
217 int enc_hdr_len;
218 int trailer_len;
219 u32 extra_flags;
220 struct xfrm_mark smark;
221 } props;
222
223 struct xfrm_lifetime_cfg lft;
224
225 /* Data for transformer */
226 struct xfrm_algo_auth *aalg;
227 struct xfrm_algo *ealg;
228 struct xfrm_algo *calg;
229 struct xfrm_algo_aead *aead;
230 const char *geniv;
231
232 /* mapping change rate limiting */
233 __be16 new_mapping_sport;
234 u32 new_mapping; /* seconds */
235 u32 mapping_maxage; /* seconds for input SA */
236
237 /* Data for encapsulator */
238 struct xfrm_encap_tmpl *encap;
239 struct sock __rcu *encap_sk;
240
241 /* NAT keepalive */
242 u32 nat_keepalive_interval; /* seconds */
243 time64_t nat_keepalive_expiration;
244
245 /* Data for care-of address */
246 xfrm_address_t *coaddr;
247
248 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
249 struct xfrm_state *tunnel;
250
251 /* If a tunnel, number of users + 1 */
252 atomic_t tunnel_users;
253
254 /* State for replay detection */
255 struct xfrm_replay_state replay;
256 struct xfrm_replay_state_esn *replay_esn;
257
258 /* Replay detection state at the time we sent the last notification */
259 struct xfrm_replay_state preplay;
260 struct xfrm_replay_state_esn *preplay_esn;
261
262 /* replay detection mode */
263 enum xfrm_replay_mode repl_mode;
264 /* internal flag that only holds state for delayed aevent at the
265 * moment
266 */
267 u32 xflags;
268
269 /* Replay detection notification settings */
270 u32 replay_maxage;
271 u32 replay_maxdiff;
272
273 /* Replay detection notification timer */
274 struct timer_list rtimer;
275
276 /* Statistics */
277 struct xfrm_stats stats;
278
279 struct xfrm_lifetime_cur curlft;
280 struct hrtimer mtimer;
281
282 struct xfrm_dev_offload xso;
283
284 /* used to fix curlft->add_time when changing date */
285 long saved_tmo;
286
287 /* Last used time */
288 time64_t lastused;
289
290 struct page_frag xfrag;
291
292 /* Reference to data common to all the instances of this
293 * transformer. */
294 const struct xfrm_type *type;
295 struct xfrm_mode inner_mode;
296 struct xfrm_mode inner_mode_iaf;
297 struct xfrm_mode outer_mode;
298
299 const struct xfrm_type_offload *type_offload;
300
301 /* Security context */
302 struct xfrm_sec_ctx *security;
303
304 /* Private data of this transformer, format is opaque,
305 * interpreted by xfrm_type methods. */
306 void *data;
307 u8 dir;
308
309 const struct xfrm_mode_cbs *mode_cbs;
310 void *mode_data;
311 };
312
xs_net(struct xfrm_state * x)313 static inline struct net *xs_net(struct xfrm_state *x)
314 {
315 return read_pnet(&x->xs_net);
316 }
317
318 /* xflags - make enum if more show up */
319 #define XFRM_TIME_DEFER 1
320 #define XFRM_SOFT_EXPIRE 2
321
322 enum {
323 XFRM_STATE_VOID,
324 XFRM_STATE_ACQ,
325 XFRM_STATE_VALID,
326 XFRM_STATE_ERROR,
327 XFRM_STATE_EXPIRED,
328 XFRM_STATE_DEAD
329 };
330
331 /* callback structure passed from either netlink or pfkey */
332 struct km_event {
333 union {
334 u32 hard;
335 u32 proto;
336 u32 byid;
337 u32 aevent;
338 u32 type;
339 } data;
340
341 u32 seq;
342 u32 portid;
343 u32 event;
344 struct net *net;
345 };
346
347 struct xfrm_if_decode_session_result {
348 struct net *net;
349 u32 if_id;
350 };
351
352 struct xfrm_if_cb {
353 bool (*decode_session)(struct sk_buff *skb,
354 unsigned short family,
355 struct xfrm_if_decode_session_result *res);
356 };
357
358 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
359 void xfrm_if_unregister_cb(void);
360
361 struct xfrm_dst_lookup_params {
362 struct net *net;
363 dscp_t dscp;
364 int oif;
365 xfrm_address_t *saddr;
366 xfrm_address_t *daddr;
367 u32 mark;
368 __u8 ipproto;
369 union flowi_uli uli;
370 };
371
372 struct net_device;
373 struct xfrm_type;
374 struct xfrm_dst;
375 struct xfrm_policy_afinfo {
376 struct dst_ops *dst_ops;
377 struct dst_entry *(*dst_lookup)(const struct xfrm_dst_lookup_params *params);
378 int (*get_saddr)(xfrm_address_t *saddr,
379 const struct xfrm_dst_lookup_params *params);
380 int (*fill_dst)(struct xfrm_dst *xdst,
381 struct net_device *dev,
382 const struct flowi *fl);
383 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
384 };
385
386 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
387 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
388 void km_policy_notify(struct xfrm_policy *xp, int dir,
389 const struct km_event *c);
390 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
391
392 struct xfrm_tmpl;
393 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
394 struct xfrm_policy *pol);
395 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
396 int __xfrm_state_delete(struct xfrm_state *x);
397
398 struct xfrm_state_afinfo {
399 u8 family;
400 u8 proto;
401
402 const struct xfrm_type_offload *type_offload_esp;
403
404 const struct xfrm_type *type_esp;
405 const struct xfrm_type *type_ipip;
406 const struct xfrm_type *type_ipip6;
407 const struct xfrm_type *type_comp;
408 const struct xfrm_type *type_ah;
409 const struct xfrm_type *type_routing;
410 const struct xfrm_type *type_dstopts;
411
412 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
413 int (*transport_finish)(struct sk_buff *skb,
414 int async);
415 void (*local_error)(struct sk_buff *skb, u32 mtu);
416 };
417
418 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
419 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
420 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
421 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
422
423 struct xfrm_input_afinfo {
424 u8 family;
425 bool is_ipip;
426 int (*callback)(struct sk_buff *skb, u8 protocol,
427 int err);
428 };
429
430 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
431 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
432
433 void xfrm_flush_gc(void);
434 void xfrm_state_delete_tunnel(struct xfrm_state *x);
435
436 struct xfrm_type {
437 struct module *owner;
438 u8 proto;
439 u8 flags;
440 #define XFRM_TYPE_NON_FRAGMENT 1
441 #define XFRM_TYPE_REPLAY_PROT 2
442 #define XFRM_TYPE_LOCAL_COADDR 4
443 #define XFRM_TYPE_REMOTE_COADDR 8
444
445 int (*init_state)(struct xfrm_state *x,
446 struct netlink_ext_ack *extack);
447 void (*destructor)(struct xfrm_state *);
448 int (*input)(struct xfrm_state *, struct sk_buff *skb);
449 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
450 int (*reject)(struct xfrm_state *, struct sk_buff *,
451 const struct flowi *);
452 };
453
454 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
455 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
456
457 struct xfrm_type_offload {
458 struct module *owner;
459 u8 proto;
460 void (*encap)(struct xfrm_state *, struct sk_buff *pskb);
461 int (*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
462 int (*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
463 };
464
465 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
466 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
467
468 /**
469 * struct xfrm_mode_cbs - XFRM mode callbacks
470 * @owner: module owner or NULL
471 * @init_state: Add/init mode specific state in `xfrm_state *x`
472 * @clone_state: Copy mode specific values from `orig` to new state `x`
473 * @destroy_state: Cleanup mode specific state from `xfrm_state *x`
474 * @user_init: Process mode specific netlink attributes from user
475 * @copy_to_user: Add netlink attributes to `attrs` based on state in `x`
476 * @sa_len: Return space required to store mode specific netlink attributes
477 * @get_inner_mtu: Return avail payload space after removing encap overhead
478 * @input: Process received packet from SA using mode
479 * @output: Output given packet using mode
480 * @prepare_output: Add mode specific encapsulation to packet in skb. On return
481 * `transport_header` should point at ESP header, `network_header` should
482 * point at outer IP header and `mac_header` should opint at the
483 * protocol/nexthdr field of the outer IP.
484 *
485 * One should examine and understand the specific uses of these callbacks in
486 * xfrm for further detail on how and when these functions are called. RTSL.
487 */
488 struct xfrm_mode_cbs {
489 struct module *owner;
490 int (*init_state)(struct xfrm_state *x);
491 int (*clone_state)(struct xfrm_state *x, struct xfrm_state *orig);
492 void (*destroy_state)(struct xfrm_state *x);
493 int (*user_init)(struct net *net, struct xfrm_state *x,
494 struct nlattr **attrs,
495 struct netlink_ext_ack *extack);
496 int (*copy_to_user)(struct xfrm_state *x, struct sk_buff *skb);
497 unsigned int (*sa_len)(const struct xfrm_state *x);
498 u32 (*get_inner_mtu)(struct xfrm_state *x, int outer_mtu);
499 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
500 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
501 int (*prepare_output)(struct xfrm_state *x, struct sk_buff *skb);
502 };
503
504 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs);
505 void xfrm_unregister_mode_cbs(u8 mode);
506
xfrm_af2proto(unsigned int family)507 static inline int xfrm_af2proto(unsigned int family)
508 {
509 switch(family) {
510 case AF_INET:
511 return IPPROTO_IPIP;
512 case AF_INET6:
513 return IPPROTO_IPV6;
514 default:
515 return 0;
516 }
517 }
518
xfrm_ip2inner_mode(struct xfrm_state * x,int ipproto)519 static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
520 {
521 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
522 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
523 return &x->inner_mode;
524 else
525 return &x->inner_mode_iaf;
526 }
527
528 struct xfrm_tmpl {
529 /* id in template is interpreted as:
530 * daddr - destination of tunnel, may be zero for transport mode.
531 * spi - zero to acquire spi. Not zero if spi is static, then
532 * daddr must be fixed too.
533 * proto - AH/ESP/IPCOMP
534 */
535 struct xfrm_id id;
536
537 /* Source address of tunnel. Ignored, if it is not a tunnel. */
538 xfrm_address_t saddr;
539
540 unsigned short encap_family;
541
542 u32 reqid;
543
544 /* Mode: transport, tunnel etc. */
545 u8 mode;
546
547 /* Sharing mode: unique, this session only, this user only etc. */
548 u8 share;
549
550 /* May skip this transfomration if no SA is found */
551 u8 optional;
552
553 /* Skip aalgos/ealgos/calgos checks. */
554 u8 allalgs;
555
556 /* Bit mask of algos allowed for acquisition */
557 u32 aalgos;
558 u32 ealgos;
559 u32 calgos;
560 };
561
562 #define XFRM_MAX_DEPTH 6
563 #define XFRM_MAX_OFFLOAD_DEPTH 1
564
565 struct xfrm_policy_walk_entry {
566 struct list_head all;
567 u8 dead;
568 };
569
570 struct xfrm_policy_walk {
571 struct xfrm_policy_walk_entry walk;
572 u8 type;
573 u32 seq;
574 };
575
576 struct xfrm_policy_queue {
577 struct sk_buff_head hold_queue;
578 struct timer_list hold_timer;
579 unsigned long timeout;
580 };
581
582 /**
583 * struct xfrm_policy - xfrm policy
584 * @xp_net: network namespace the policy lives in
585 * @bydst: hlist node for SPD hash table or rbtree list
586 * @byidx: hlist node for index hash table
587 * @state_cache_list: hlist head for policy cached xfrm states
588 * @lock: serialize changes to policy structure members
589 * @refcnt: reference count, freed once it reaches 0
590 * @pos: kernel internal tie-breaker to determine age of policy
591 * @timer: timer
592 * @genid: generation, used to invalidate old policies
593 * @priority: priority, set by userspace
594 * @index: policy index (autogenerated)
595 * @if_id: virtual xfrm interface id
596 * @mark: packet mark
597 * @selector: selector
598 * @lft: liftime configuration data
599 * @curlft: liftime state
600 * @walk: list head on pernet policy list
601 * @polq: queue to hold packets while aqcuire operaion in progress
602 * @bydst_reinsert: policy tree node needs to be merged
603 * @type: XFRM_POLICY_TYPE_MAIN or _SUB
604 * @action: XFRM_POLICY_ALLOW or _BLOCK
605 * @flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP
606 * @xfrm_nr: number of used templates in @xfrm_vec
607 * @family: protocol family
608 * @security: SELinux security label
609 * @xfrm_vec: array of templates to resolve state
610 * @rcu: rcu head, used to defer memory release
611 * @xdo: hardware offload state
612 */
613 struct xfrm_policy {
614 possible_net_t xp_net;
615 struct hlist_node bydst;
616 struct hlist_node byidx;
617
618 struct hlist_head state_cache_list;
619
620 /* This lock only affects elements except for entry. */
621 rwlock_t lock;
622 refcount_t refcnt;
623 u32 pos;
624 struct timer_list timer;
625
626 atomic_t genid;
627 u32 priority;
628 u32 index;
629 u32 if_id;
630 struct xfrm_mark mark;
631 struct xfrm_selector selector;
632 struct xfrm_lifetime_cfg lft;
633 struct xfrm_lifetime_cur curlft;
634 struct xfrm_policy_walk_entry walk;
635 struct xfrm_policy_queue polq;
636 bool bydst_reinsert;
637 u8 type;
638 u8 action;
639 u8 flags;
640 u8 xfrm_nr;
641 u16 family;
642 struct xfrm_sec_ctx *security;
643 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
644 struct rcu_head rcu;
645
646 struct xfrm_dev_offload xdo;
647 };
648
xp_net(const struct xfrm_policy * xp)649 static inline struct net *xp_net(const struct xfrm_policy *xp)
650 {
651 return read_pnet(&xp->xp_net);
652 }
653
654 struct xfrm_kmaddress {
655 xfrm_address_t local;
656 xfrm_address_t remote;
657 u32 reserved;
658 u16 family;
659 };
660
661 struct xfrm_migrate {
662 xfrm_address_t old_daddr;
663 xfrm_address_t old_saddr;
664 xfrm_address_t new_daddr;
665 xfrm_address_t new_saddr;
666 u8 proto;
667 u8 mode;
668 u16 reserved;
669 u32 reqid;
670 u16 old_family;
671 u16 new_family;
672 };
673
674 #define XFRM_KM_TIMEOUT 30
675 /* what happened */
676 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
677 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
678
679 /* default aevent timeout in units of 100ms */
680 #define XFRM_AE_ETIME 10
681 /* Async Event timer multiplier */
682 #define XFRM_AE_ETH_M 10
683 /* default seq threshold size */
684 #define XFRM_AE_SEQT_SIZE 2
685
686 struct xfrm_mgr {
687 struct list_head list;
688 int (*notify)(struct xfrm_state *x, const struct km_event *c);
689 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
690 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
691 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
692 int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
693 int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
694 int (*migrate)(const struct xfrm_selector *sel,
695 u8 dir, u8 type,
696 const struct xfrm_migrate *m,
697 int num_bundles,
698 const struct xfrm_kmaddress *k,
699 const struct xfrm_encap_tmpl *encap);
700 bool (*is_alive)(const struct km_event *c);
701 };
702
703 void xfrm_register_km(struct xfrm_mgr *km);
704 void xfrm_unregister_km(struct xfrm_mgr *km);
705
706 struct xfrm_tunnel_skb_cb {
707 union {
708 struct inet_skb_parm h4;
709 struct inet6_skb_parm h6;
710 } header;
711
712 union {
713 struct ip_tunnel *ip4;
714 struct ip6_tnl *ip6;
715 } tunnel;
716 };
717
718 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
719
720 /*
721 * This structure is used for the duration where packets are being
722 * transformed by IPsec. As soon as the packet leaves IPsec the
723 * area beyond the generic IP part may be overwritten.
724 */
725 struct xfrm_skb_cb {
726 struct xfrm_tunnel_skb_cb header;
727
728 /* Sequence number for replay protection. */
729 union {
730 struct {
731 __u32 low;
732 __u32 hi;
733 } output;
734 struct {
735 __be32 low;
736 __be32 hi;
737 } input;
738 } seq;
739 };
740
741 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
742
743 /*
744 * This structure is used by the afinfo prepare_input/prepare_output functions
745 * to transmit header information to the mode input/output functions.
746 */
747 struct xfrm_mode_skb_cb {
748 struct xfrm_tunnel_skb_cb header;
749
750 /* Copied from header for IPv4, always set to zero and DF for IPv6. */
751 __be16 id;
752 __be16 frag_off;
753
754 /* IP header length (excluding options or extension headers). */
755 u8 ihl;
756
757 /* TOS for IPv4, class for IPv6. */
758 u8 tos;
759
760 /* TTL for IPv4, hop limitfor IPv6. */
761 u8 ttl;
762
763 /* Protocol for IPv4, NH for IPv6. */
764 u8 protocol;
765
766 /* Option length for IPv4, zero for IPv6. */
767 u8 optlen;
768
769 /* Used by IPv6 only, zero for IPv4. */
770 u8 flow_lbl[3];
771 };
772
773 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
774
775 /*
776 * This structure is used by the input processing to locate the SPI and
777 * related information.
778 */
779 struct xfrm_spi_skb_cb {
780 struct xfrm_tunnel_skb_cb header;
781
782 unsigned int daddroff;
783 unsigned int family;
784 __be32 seq;
785 };
786
787 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
788
789 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_start(const char * op)790 static inline struct audit_buffer *xfrm_audit_start(const char *op)
791 {
792 struct audit_buffer *audit_buf = NULL;
793
794 if (audit_enabled == AUDIT_OFF)
795 return NULL;
796 audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
797 AUDIT_MAC_IPSEC_EVENT);
798 if (audit_buf == NULL)
799 return NULL;
800 audit_log_format(audit_buf, "op=%s", op);
801 return audit_buf;
802 }
803
xfrm_audit_helper_usrinfo(bool task_valid,struct audit_buffer * audit_buf)804 static inline void xfrm_audit_helper_usrinfo(bool task_valid,
805 struct audit_buffer *audit_buf)
806 {
807 const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
808 audit_get_loginuid(current) :
809 INVALID_UID);
810 const unsigned int ses = task_valid ? audit_get_sessionid(current) :
811 AUDIT_SID_UNSET;
812
813 audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
814 audit_log_task_context(audit_buf);
815 }
816
817 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
818 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
819 bool task_valid);
820 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
821 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
822 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
823 struct sk_buff *skb);
824 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
825 __be32 net_seq);
826 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
827 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
828 __be32 net_seq);
829 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
830 u8 proto);
831 #else
832
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)833 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
834 bool task_valid)
835 {
836 }
837
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)838 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
839 bool task_valid)
840 {
841 }
842
xfrm_audit_state_add(struct xfrm_state * x,int result,bool task_valid)843 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
844 bool task_valid)
845 {
846 }
847
xfrm_audit_state_delete(struct xfrm_state * x,int result,bool task_valid)848 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
849 bool task_valid)
850 {
851 }
852
xfrm_audit_state_replay_overflow(struct xfrm_state * x,struct sk_buff * skb)853 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
854 struct sk_buff *skb)
855 {
856 }
857
xfrm_audit_state_replay(struct xfrm_state * x,struct sk_buff * skb,__be32 net_seq)858 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
859 struct sk_buff *skb, __be32 net_seq)
860 {
861 }
862
xfrm_audit_state_notfound_simple(struct sk_buff * skb,u16 family)863 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
864 u16 family)
865 {
866 }
867
xfrm_audit_state_notfound(struct sk_buff * skb,u16 family,__be32 net_spi,__be32 net_seq)868 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
869 __be32 net_spi, __be32 net_seq)
870 {
871 }
872
xfrm_audit_state_icvfail(struct xfrm_state * x,struct sk_buff * skb,u8 proto)873 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
874 struct sk_buff *skb, u8 proto)
875 {
876 }
877 #endif /* CONFIG_AUDITSYSCALL */
878
xfrm_pol_hold(struct xfrm_policy * policy)879 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
880 {
881 if (likely(policy != NULL))
882 refcount_inc(&policy->refcnt);
883 }
884
885 void xfrm_policy_destroy(struct xfrm_policy *policy);
886
xfrm_pol_put(struct xfrm_policy * policy)887 static inline void xfrm_pol_put(struct xfrm_policy *policy)
888 {
889 if (refcount_dec_and_test(&policy->refcnt))
890 xfrm_policy_destroy(policy);
891 }
892
xfrm_pols_put(struct xfrm_policy ** pols,int npols)893 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
894 {
895 int i;
896 for (i = npols - 1; i >= 0; --i)
897 xfrm_pol_put(pols[i]);
898 }
899
900 void __xfrm_state_destroy(struct xfrm_state *, bool);
901
__xfrm_state_put(struct xfrm_state * x)902 static inline void __xfrm_state_put(struct xfrm_state *x)
903 {
904 refcount_dec(&x->refcnt);
905 }
906
xfrm_state_put(struct xfrm_state * x)907 static inline void xfrm_state_put(struct xfrm_state *x)
908 {
909 if (refcount_dec_and_test(&x->refcnt))
910 __xfrm_state_destroy(x, false);
911 }
912
xfrm_state_put_sync(struct xfrm_state * x)913 static inline void xfrm_state_put_sync(struct xfrm_state *x)
914 {
915 if (refcount_dec_and_test(&x->refcnt))
916 __xfrm_state_destroy(x, true);
917 }
918
xfrm_state_hold(struct xfrm_state * x)919 static inline void xfrm_state_hold(struct xfrm_state *x)
920 {
921 refcount_inc(&x->refcnt);
922 }
923
addr_match(const void * token1,const void * token2,unsigned int prefixlen)924 static inline bool addr_match(const void *token1, const void *token2,
925 unsigned int prefixlen)
926 {
927 const __be32 *a1 = token1;
928 const __be32 *a2 = token2;
929 unsigned int pdw;
930 unsigned int pbi;
931
932 pdw = prefixlen >> 5; /* num of whole u32 in prefix */
933 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
934
935 if (pdw)
936 if (memcmp(a1, a2, pdw << 2))
937 return false;
938
939 if (pbi) {
940 __be32 mask;
941
942 mask = htonl((0xffffffff) << (32 - pbi));
943
944 if ((a1[pdw] ^ a2[pdw]) & mask)
945 return false;
946 }
947
948 return true;
949 }
950
addr4_match(__be32 a1,__be32 a2,u8 prefixlen)951 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
952 {
953 /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
954 if (sizeof(long) == 4 && prefixlen == 0)
955 return true;
956 return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
957 }
958
959 static __inline__
xfrm_flowi_sport(const struct flowi * fl,const union flowi_uli * uli)960 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
961 {
962 __be16 port;
963 switch(fl->flowi_proto) {
964 case IPPROTO_TCP:
965 case IPPROTO_UDP:
966 case IPPROTO_UDPLITE:
967 case IPPROTO_SCTP:
968 port = uli->ports.sport;
969 break;
970 case IPPROTO_ICMP:
971 case IPPROTO_ICMPV6:
972 port = htons(uli->icmpt.type);
973 break;
974 case IPPROTO_MH:
975 port = htons(uli->mht.type);
976 break;
977 case IPPROTO_GRE:
978 port = htons(ntohl(uli->gre_key) >> 16);
979 break;
980 default:
981 port = 0; /*XXX*/
982 }
983 return port;
984 }
985
986 static __inline__
xfrm_flowi_dport(const struct flowi * fl,const union flowi_uli * uli)987 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
988 {
989 __be16 port;
990 switch(fl->flowi_proto) {
991 case IPPROTO_TCP:
992 case IPPROTO_UDP:
993 case IPPROTO_UDPLITE:
994 case IPPROTO_SCTP:
995 port = uli->ports.dport;
996 break;
997 case IPPROTO_ICMP:
998 case IPPROTO_ICMPV6:
999 port = htons(uli->icmpt.code);
1000 break;
1001 case IPPROTO_GRE:
1002 port = htons(ntohl(uli->gre_key) & 0xffff);
1003 break;
1004 default:
1005 port = 0; /*XXX*/
1006 }
1007 return port;
1008 }
1009
1010 bool xfrm_selector_match(const struct xfrm_selector *sel,
1011 const struct flowi *fl, unsigned short family);
1012
1013 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1014 /* If neither has a context --> match
1015 * Otherwise, both must have a context and the sids, doi, alg must match
1016 */
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)1017 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
1018 {
1019 return ((!s1 && !s2) ||
1020 (s1 && s2 &&
1021 (s1->ctx_sid == s2->ctx_sid) &&
1022 (s1->ctx_doi == s2->ctx_doi) &&
1023 (s1->ctx_alg == s2->ctx_alg)));
1024 }
1025 #else
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)1026 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
1027 {
1028 return true;
1029 }
1030 #endif
1031
1032 /* A struct encoding bundle of transformations to apply to some set of flow.
1033 *
1034 * xdst->child points to the next element of bundle.
1035 * dst->xfrm points to an instanse of transformer.
1036 *
1037 * Due to unfortunate limitations of current routing cache, which we
1038 * have no time to fix, it mirrors struct rtable and bound to the same
1039 * routing key, including saddr,daddr. However, we can have many of
1040 * bundles differing by session id. All the bundles grow from a parent
1041 * policy rule.
1042 */
1043 struct xfrm_dst {
1044 union {
1045 struct dst_entry dst;
1046 struct rtable rt;
1047 struct rt6_info rt6;
1048 } u;
1049 struct dst_entry *route;
1050 struct dst_entry *child;
1051 struct dst_entry *path;
1052 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1053 int num_pols, num_xfrms;
1054 u32 xfrm_genid;
1055 u32 policy_genid;
1056 u32 route_mtu_cached;
1057 u32 child_mtu_cached;
1058 u32 route_cookie;
1059 u32 path_cookie;
1060 };
1061
xfrm_dst_path(const struct dst_entry * dst)1062 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
1063 {
1064 #ifdef CONFIG_XFRM
1065 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1066 const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
1067
1068 return xdst->path;
1069 }
1070 #endif
1071 return (struct dst_entry *) dst;
1072 }
1073
xfrm_dst_child(const struct dst_entry * dst)1074 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
1075 {
1076 #ifdef CONFIG_XFRM
1077 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1078 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1079 return xdst->child;
1080 }
1081 #endif
1082 return NULL;
1083 }
1084
1085 #ifdef CONFIG_XFRM
xfrm_dst_set_child(struct xfrm_dst * xdst,struct dst_entry * child)1086 static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
1087 {
1088 xdst->child = child;
1089 }
1090
xfrm_dst_destroy(struct xfrm_dst * xdst)1091 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
1092 {
1093 xfrm_pols_put(xdst->pols, xdst->num_pols);
1094 dst_release(xdst->route);
1095 if (likely(xdst->u.dst.xfrm))
1096 xfrm_state_put(xdst->u.dst.xfrm);
1097 }
1098 #endif
1099
1100 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
1101
1102 struct xfrm_if_parms {
1103 int link; /* ifindex of underlying L2 interface */
1104 u32 if_id; /* interface identifier */
1105 bool collect_md;
1106 };
1107
1108 struct xfrm_if {
1109 struct xfrm_if __rcu *next; /* next interface in list */
1110 struct net_device *dev; /* virtual device associated with interface */
1111 struct net *net; /* netns for packet i/o */
1112 struct xfrm_if_parms p; /* interface parms */
1113
1114 struct gro_cells gro_cells;
1115 };
1116
1117 struct xfrm_offload {
1118 /* Output sequence number for replay protection on offloading. */
1119 struct {
1120 __u32 low;
1121 __u32 hi;
1122 } seq;
1123
1124 __u32 flags;
1125 #define SA_DELETE_REQ 1
1126 #define CRYPTO_DONE 2
1127 #define CRYPTO_NEXT_DONE 4
1128 #define CRYPTO_FALLBACK 8
1129 #define XFRM_GSO_SEGMENT 16
1130 #define XFRM_GRO 32
1131 /* 64 is free */
1132 #define XFRM_DEV_RESUME 128
1133 #define XFRM_XMIT 256
1134
1135 __u32 status;
1136 #define CRYPTO_SUCCESS 1
1137 #define CRYPTO_GENERIC_ERROR 2
1138 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED 4
1139 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED 8
1140 #define CRYPTO_TUNNEL_AH_AUTH_FAILED 16
1141 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED 32
1142 #define CRYPTO_INVALID_PACKET_SYNTAX 64
1143 #define CRYPTO_INVALID_PROTOCOL 128
1144
1145 /* Used to keep whole l2 header for transport mode GRO */
1146 __u32 orig_mac_len;
1147
1148 __u8 proto;
1149 __u8 inner_ipproto;
1150 };
1151
1152 struct sec_path {
1153 int len;
1154 int olen;
1155 int verified_cnt;
1156
1157 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
1158 struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH];
1159 };
1160
1161 struct sec_path *secpath_set(struct sk_buff *skb);
1162
1163 static inline void
secpath_reset(struct sk_buff * skb)1164 secpath_reset(struct sk_buff *skb)
1165 {
1166 #ifdef CONFIG_XFRM
1167 skb_ext_del(skb, SKB_EXT_SEC_PATH);
1168 #endif
1169 }
1170
1171 static inline int
xfrm_addr_any(const xfrm_address_t * addr,unsigned short family)1172 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1173 {
1174 switch (family) {
1175 case AF_INET:
1176 return addr->a4 == 0;
1177 case AF_INET6:
1178 return ipv6_addr_any(&addr->in6);
1179 }
1180 return 0;
1181 }
1182
1183 static inline int
__xfrm4_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1184 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1185 {
1186 return (tmpl->saddr.a4 &&
1187 tmpl->saddr.a4 != x->props.saddr.a4);
1188 }
1189
1190 static inline int
__xfrm6_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1191 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1192 {
1193 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1194 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1195 }
1196
1197 static inline int
xfrm_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family)1198 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1199 {
1200 switch (family) {
1201 case AF_INET:
1202 return __xfrm4_state_addr_cmp(tmpl, x);
1203 case AF_INET6:
1204 return __xfrm6_state_addr_cmp(tmpl, x);
1205 }
1206 return !0;
1207 }
1208
1209 #ifdef CONFIG_XFRM
xfrm_input_state(struct sk_buff * skb)1210 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1211 {
1212 struct sec_path *sp = skb_sec_path(skb);
1213
1214 return sp->xvec[sp->len - 1];
1215 }
1216 #endif
1217
xfrm_offload(struct sk_buff * skb)1218 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1219 {
1220 #ifdef CONFIG_XFRM
1221 struct sec_path *sp = skb_sec_path(skb);
1222
1223 if (!sp || !sp->olen || sp->len != sp->olen)
1224 return NULL;
1225
1226 return &sp->ovec[sp->olen - 1];
1227 #else
1228 return NULL;
1229 #endif
1230 }
1231
1232 #ifdef CONFIG_XFRM
1233 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1234 unsigned short family);
1235
__xfrm_check_nopolicy(struct net * net,struct sk_buff * skb,int dir)1236 static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
1237 int dir)
1238 {
1239 if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
1240 return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
1241
1242 return false;
1243 }
1244
__xfrm_check_dev_nopolicy(struct sk_buff * skb,int dir,unsigned short family)1245 static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
1246 int dir, unsigned short family)
1247 {
1248 if (dir != XFRM_POLICY_OUT && family == AF_INET) {
1249 /* same dst may be used for traffic originating from
1250 * devices with different policy settings.
1251 */
1252 return IPCB(skb)->flags & IPSKB_NOPOLICY;
1253 }
1254 return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
1255 }
1256
__xfrm_policy_check2(struct sock * sk,int dir,struct sk_buff * skb,unsigned int family,int reverse)1257 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1258 struct sk_buff *skb,
1259 unsigned int family, int reverse)
1260 {
1261 struct net *net = dev_net(skb->dev);
1262 int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1263 struct xfrm_offload *xo = xfrm_offload(skb);
1264 struct xfrm_state *x;
1265
1266 if (sk && sk->sk_policy[XFRM_POLICY_IN])
1267 return __xfrm_policy_check(sk, ndir, skb, family);
1268
1269 if (xo) {
1270 x = xfrm_input_state(skb);
1271 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
1272 bool check = (xo->flags & CRYPTO_DONE) &&
1273 (xo->status & CRYPTO_SUCCESS);
1274
1275 /* The packets here are plain ones and secpath was
1276 * needed to indicate that hardware already handled
1277 * them and there is no need to do nothing in addition.
1278 *
1279 * Consume secpath which was set by drivers.
1280 */
1281 secpath_reset(skb);
1282 return check;
1283 }
1284 }
1285
1286 return __xfrm_check_nopolicy(net, skb, dir) ||
1287 __xfrm_check_dev_nopolicy(skb, dir, family) ||
1288 __xfrm_policy_check(sk, ndir, skb, family);
1289 }
1290
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1291 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1292 {
1293 return __xfrm_policy_check2(sk, dir, skb, family, 0);
1294 }
1295
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1296 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1297 {
1298 return xfrm_policy_check(sk, dir, skb, AF_INET);
1299 }
1300
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1301 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1302 {
1303 return xfrm_policy_check(sk, dir, skb, AF_INET6);
1304 }
1305
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1306 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1307 struct sk_buff *skb)
1308 {
1309 return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1310 }
1311
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1312 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1313 struct sk_buff *skb)
1314 {
1315 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1316 }
1317
1318 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1319 unsigned int family, int reverse);
1320
xfrm_decode_session(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1321 static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1322 unsigned int family)
1323 {
1324 return __xfrm_decode_session(net, skb, fl, family, 0);
1325 }
1326
xfrm_decode_session_reverse(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1327 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1328 struct flowi *fl,
1329 unsigned int family)
1330 {
1331 return __xfrm_decode_session(net, skb, fl, family, 1);
1332 }
1333
1334 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1335
xfrm_route_forward(struct sk_buff * skb,unsigned short family)1336 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1337 {
1338 struct net *net = dev_net(skb->dev);
1339
1340 if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
1341 net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
1342 return true;
1343
1344 return (skb_dst(skb)->flags & DST_NOXFRM) ||
1345 __xfrm_route_forward(skb, family);
1346 }
1347
xfrm4_route_forward(struct sk_buff * skb)1348 static inline int xfrm4_route_forward(struct sk_buff *skb)
1349 {
1350 return xfrm_route_forward(skb, AF_INET);
1351 }
1352
xfrm6_route_forward(struct sk_buff * skb)1353 static inline int xfrm6_route_forward(struct sk_buff *skb)
1354 {
1355 return xfrm_route_forward(skb, AF_INET6);
1356 }
1357
1358 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1359
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1360 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1361 {
1362 if (!sk_fullsock(osk))
1363 return 0;
1364 sk->sk_policy[0] = NULL;
1365 sk->sk_policy[1] = NULL;
1366 if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1367 return __xfrm_sk_clone_policy(sk, osk);
1368 return 0;
1369 }
1370
1371 int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1372
xfrm_sk_free_policy(struct sock * sk)1373 static inline void xfrm_sk_free_policy(struct sock *sk)
1374 {
1375 struct xfrm_policy *pol;
1376
1377 pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1378 if (unlikely(pol != NULL)) {
1379 xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1380 sk->sk_policy[0] = NULL;
1381 }
1382 pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1383 if (unlikely(pol != NULL)) {
1384 xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1385 sk->sk_policy[1] = NULL;
1386 }
1387 }
1388
1389 #else
1390
xfrm_sk_free_policy(struct sock * sk)1391 static inline void xfrm_sk_free_policy(struct sock *sk) {}
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1392 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
xfrm6_route_forward(struct sk_buff * skb)1393 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
xfrm4_route_forward(struct sk_buff * skb)1394 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1395 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1396 {
1397 return 1;
1398 }
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1399 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1400 {
1401 return 1;
1402 }
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1403 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1404 {
1405 return 1;
1406 }
xfrm_decode_session_reverse(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1407 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1408 struct flowi *fl,
1409 unsigned int family)
1410 {
1411 return -ENOSYS;
1412 }
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1413 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1414 struct sk_buff *skb)
1415 {
1416 return 1;
1417 }
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1418 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1419 struct sk_buff *skb)
1420 {
1421 return 1;
1422 }
1423 #endif
1424
1425 static __inline__
xfrm_flowi_daddr(const struct flowi * fl,unsigned short family)1426 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1427 {
1428 switch (family){
1429 case AF_INET:
1430 return (xfrm_address_t *)&fl->u.ip4.daddr;
1431 case AF_INET6:
1432 return (xfrm_address_t *)&fl->u.ip6.daddr;
1433 }
1434 return NULL;
1435 }
1436
1437 static __inline__
xfrm_flowi_saddr(const struct flowi * fl,unsigned short family)1438 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1439 {
1440 switch (family){
1441 case AF_INET:
1442 return (xfrm_address_t *)&fl->u.ip4.saddr;
1443 case AF_INET6:
1444 return (xfrm_address_t *)&fl->u.ip6.saddr;
1445 }
1446 return NULL;
1447 }
1448
1449 static __inline__
xfrm_flowi_addr_get(const struct flowi * fl,xfrm_address_t * saddr,xfrm_address_t * daddr,unsigned short family)1450 void xfrm_flowi_addr_get(const struct flowi *fl,
1451 xfrm_address_t *saddr, xfrm_address_t *daddr,
1452 unsigned short family)
1453 {
1454 switch(family) {
1455 case AF_INET:
1456 memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1457 memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1458 break;
1459 case AF_INET6:
1460 saddr->in6 = fl->u.ip6.saddr;
1461 daddr->in6 = fl->u.ip6.daddr;
1462 break;
1463 }
1464 }
1465
1466 static __inline__ int
__xfrm4_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1467 __xfrm4_state_addr_check(const struct xfrm_state *x,
1468 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1469 {
1470 if (daddr->a4 == x->id.daddr.a4 &&
1471 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1472 return 1;
1473 return 0;
1474 }
1475
1476 static __inline__ int
__xfrm6_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1477 __xfrm6_state_addr_check(const struct xfrm_state *x,
1478 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1479 {
1480 if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1481 (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1482 ipv6_addr_any((struct in6_addr *)saddr) ||
1483 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1484 return 1;
1485 return 0;
1486 }
1487
1488 static __inline__ int
xfrm_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)1489 xfrm_state_addr_check(const struct xfrm_state *x,
1490 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1491 unsigned short family)
1492 {
1493 switch (family) {
1494 case AF_INET:
1495 return __xfrm4_state_addr_check(x, daddr, saddr);
1496 case AF_INET6:
1497 return __xfrm6_state_addr_check(x, daddr, saddr);
1498 }
1499 return 0;
1500 }
1501
1502 static __inline__ int
xfrm_state_addr_flow_check(const struct xfrm_state * x,const struct flowi * fl,unsigned short family)1503 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1504 unsigned short family)
1505 {
1506 switch (family) {
1507 case AF_INET:
1508 return __xfrm4_state_addr_check(x,
1509 (const xfrm_address_t *)&fl->u.ip4.daddr,
1510 (const xfrm_address_t *)&fl->u.ip4.saddr);
1511 case AF_INET6:
1512 return __xfrm6_state_addr_check(x,
1513 (const xfrm_address_t *)&fl->u.ip6.daddr,
1514 (const xfrm_address_t *)&fl->u.ip6.saddr);
1515 }
1516 return 0;
1517 }
1518
xfrm_state_kern(const struct xfrm_state * x)1519 static inline int xfrm_state_kern(const struct xfrm_state *x)
1520 {
1521 return atomic_read(&x->tunnel_users);
1522 }
1523
xfrm_id_proto_valid(u8 proto)1524 static inline bool xfrm_id_proto_valid(u8 proto)
1525 {
1526 switch (proto) {
1527 case IPPROTO_AH:
1528 case IPPROTO_ESP:
1529 case IPPROTO_COMP:
1530 #if IS_ENABLED(CONFIG_IPV6)
1531 case IPPROTO_ROUTING:
1532 case IPPROTO_DSTOPTS:
1533 #endif
1534 return true;
1535 default:
1536 return false;
1537 }
1538 }
1539
1540 /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
xfrm_id_proto_match(u8 proto,u8 userproto)1541 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1542 {
1543 return (!userproto || proto == userproto ||
1544 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1545 proto == IPPROTO_ESP ||
1546 proto == IPPROTO_COMP)));
1547 }
1548
1549 /*
1550 * xfrm algorithm information
1551 */
1552 struct xfrm_algo_aead_info {
1553 char *geniv;
1554 u16 icv_truncbits;
1555 };
1556
1557 struct xfrm_algo_auth_info {
1558 u16 icv_truncbits;
1559 u16 icv_fullbits;
1560 };
1561
1562 struct xfrm_algo_encr_info {
1563 char *geniv;
1564 u16 blockbits;
1565 u16 defkeybits;
1566 };
1567
1568 struct xfrm_algo_comp_info {
1569 u16 threshold;
1570 };
1571
1572 struct xfrm_algo_desc {
1573 char *name;
1574 char *compat;
1575 u8 available:1;
1576 u8 pfkey_supported:1;
1577 union {
1578 struct xfrm_algo_aead_info aead;
1579 struct xfrm_algo_auth_info auth;
1580 struct xfrm_algo_encr_info encr;
1581 struct xfrm_algo_comp_info comp;
1582 } uinfo;
1583 struct sadb_alg desc;
1584 };
1585
1586 /* XFRM protocol handlers. */
1587 struct xfrm4_protocol {
1588 int (*handler)(struct sk_buff *skb);
1589 int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1590 int encap_type);
1591 int (*cb_handler)(struct sk_buff *skb, int err);
1592 int (*err_handler)(struct sk_buff *skb, u32 info);
1593
1594 struct xfrm4_protocol __rcu *next;
1595 int priority;
1596 };
1597
1598 struct xfrm6_protocol {
1599 int (*handler)(struct sk_buff *skb);
1600 int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1601 int encap_type);
1602 int (*cb_handler)(struct sk_buff *skb, int err);
1603 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1604 u8 type, u8 code, int offset, __be32 info);
1605
1606 struct xfrm6_protocol __rcu *next;
1607 int priority;
1608 };
1609
1610 /* XFRM tunnel handlers. */
1611 struct xfrm_tunnel {
1612 int (*handler)(struct sk_buff *skb);
1613 int (*cb_handler)(struct sk_buff *skb, int err);
1614 int (*err_handler)(struct sk_buff *skb, u32 info);
1615
1616 struct xfrm_tunnel __rcu *next;
1617 int priority;
1618 };
1619
1620 struct xfrm6_tunnel {
1621 int (*handler)(struct sk_buff *skb);
1622 int (*cb_handler)(struct sk_buff *skb, int err);
1623 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1624 u8 type, u8 code, int offset, __be32 info);
1625 struct xfrm6_tunnel __rcu *next;
1626 int priority;
1627 };
1628
1629 void xfrm_init(void);
1630 void xfrm4_init(void);
1631 int xfrm_state_init(struct net *net);
1632 void xfrm_state_fini(struct net *net);
1633 void xfrm4_state_init(void);
1634 void xfrm4_protocol_init(void);
1635 #ifdef CONFIG_XFRM
1636 int xfrm6_init(void);
1637 void xfrm6_fini(void);
1638 int xfrm6_state_init(void);
1639 void xfrm6_state_fini(void);
1640 int xfrm6_protocol_init(void);
1641 void xfrm6_protocol_fini(void);
1642 #else
xfrm6_init(void)1643 static inline int xfrm6_init(void)
1644 {
1645 return 0;
1646 }
xfrm6_fini(void)1647 static inline void xfrm6_fini(void)
1648 {
1649 ;
1650 }
1651 #endif
1652
1653 #ifdef CONFIG_XFRM_STATISTICS
1654 int xfrm_proc_init(struct net *net);
1655 void xfrm_proc_fini(struct net *net);
1656 #endif
1657
1658 int xfrm_sysctl_init(struct net *net);
1659 #ifdef CONFIG_SYSCTL
1660 void xfrm_sysctl_fini(struct net *net);
1661 #else
xfrm_sysctl_fini(struct net * net)1662 static inline void xfrm_sysctl_fini(struct net *net)
1663 {
1664 }
1665 #endif
1666
1667 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1668 struct xfrm_address_filter *filter);
1669 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1670 int (*func)(struct xfrm_state *, int, void*), void *);
1671 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1672 struct xfrm_state *xfrm_state_alloc(struct net *net);
1673 void xfrm_state_free(struct xfrm_state *x);
1674 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1675 const xfrm_address_t *saddr,
1676 const struct flowi *fl,
1677 struct xfrm_tmpl *tmpl,
1678 struct xfrm_policy *pol, int *err,
1679 unsigned short family, u32 if_id);
1680 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1681 xfrm_address_t *daddr,
1682 xfrm_address_t *saddr,
1683 unsigned short family,
1684 u8 mode, u8 proto, u32 reqid);
1685 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1686 unsigned short family);
1687 int xfrm_state_check_expire(struct xfrm_state *x);
1688 void xfrm_state_update_stats(struct net *net);
1689 #ifdef CONFIG_XFRM_OFFLOAD
xfrm_dev_state_update_stats(struct xfrm_state * x)1690 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
1691 {
1692 struct xfrm_dev_offload *xdo = &x->xso;
1693 struct net_device *dev = READ_ONCE(xdo->dev);
1694
1695 if (dev && dev->xfrmdev_ops &&
1696 dev->xfrmdev_ops->xdo_dev_state_update_stats)
1697 dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
1698
1699 }
1700 #else
xfrm_dev_state_update_stats(struct xfrm_state * x)1701 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
1702 #endif
1703 void xfrm_state_insert(struct xfrm_state *x);
1704 int xfrm_state_add(struct xfrm_state *x);
1705 int xfrm_state_update(struct xfrm_state *x);
1706 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1707 const xfrm_address_t *daddr, __be32 spi,
1708 u8 proto, unsigned short family);
1709 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
1710 const xfrm_address_t *daddr,
1711 __be32 spi, u8 proto,
1712 unsigned short family);
1713 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1714 const xfrm_address_t *daddr,
1715 const xfrm_address_t *saddr,
1716 u8 proto,
1717 unsigned short family);
1718 #ifdef CONFIG_XFRM_SUB_POLICY
1719 void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1720 unsigned short family);
1721 void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1722 unsigned short family);
1723 #else
xfrm_tmpl_sort(struct xfrm_tmpl ** d,struct xfrm_tmpl ** s,int n,unsigned short family)1724 static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
1725 int n, unsigned short family)
1726 {
1727 }
1728
xfrm_state_sort(struct xfrm_state ** d,struct xfrm_state ** s,int n,unsigned short family)1729 static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
1730 int n, unsigned short family)
1731 {
1732 }
1733 #endif
1734
1735 struct xfrmk_sadinfo {
1736 u32 sadhcnt; /* current hash bkts */
1737 u32 sadhmcnt; /* max allowed hash bkts */
1738 u32 sadcnt; /* current running count */
1739 };
1740
1741 struct xfrmk_spdinfo {
1742 u32 incnt;
1743 u32 outcnt;
1744 u32 fwdcnt;
1745 u32 inscnt;
1746 u32 outscnt;
1747 u32 fwdscnt;
1748 u32 spdhcnt;
1749 u32 spdhmcnt;
1750 };
1751
1752 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
1753 int xfrm_state_delete(struct xfrm_state *x);
1754 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1755 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1756 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1757 bool task_valid);
1758 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1759 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1760 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1761 int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
1762 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
1763 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload,
1764 struct netlink_ext_ack *extack);
1765 int xfrm_init_state(struct xfrm_state *x);
1766 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1767 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1768 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
1769 int (*finish)(struct net *, struct sock *,
1770 struct sk_buff *));
1771 int xfrm_trans_queue(struct sk_buff *skb,
1772 int (*finish)(struct net *, struct sock *,
1773 struct sk_buff *));
1774 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
1775 int xfrm_output(struct sock *sk, struct sk_buff *skb);
1776
1777 #if IS_ENABLED(CONFIG_NET_PKTGEN)
1778 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
1779 #endif
1780
1781 void xfrm_local_error(struct sk_buff *skb, int mtu);
1782 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1783 int encap_type);
1784 int xfrm4_transport_finish(struct sk_buff *skb, int async);
1785 int xfrm4_rcv(struct sk_buff *skb);
1786
xfrm4_rcv_spi(struct sk_buff * skb,int nexthdr,__be32 spi)1787 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1788 {
1789 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1790 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1791 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1792 return xfrm_input(skb, nexthdr, spi, 0);
1793 }
1794
1795 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1796 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1797 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1798 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1799 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1800 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1801 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1802 struct ip6_tnl *t);
1803 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1804 int encap_type);
1805 int xfrm6_transport_finish(struct sk_buff *skb, int async);
1806 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1807 int xfrm6_rcv(struct sk_buff *skb);
1808 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1809 xfrm_address_t *saddr, u8 proto);
1810 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1811 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
1812 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
1813 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1814 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1815 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1816 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1817 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1818
1819 #ifdef CONFIG_XFRM
1820 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
1821 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1822 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1823 struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1824 struct sk_buff *skb);
1825 struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1826 struct sk_buff *skb);
1827 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
1828 int optlen);
1829 #else
xfrm_user_policy(struct sock * sk,int optname,sockptr_t optval,int optlen)1830 static inline int xfrm_user_policy(struct sock *sk, int optname,
1831 sockptr_t optval, int optlen)
1832 {
1833 return -ENOPROTOOPT;
1834 }
1835 #endif
1836
1837 struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params);
1838
1839 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1840
1841 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1842 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1843 int (*func)(struct xfrm_policy *, int, int, void*),
1844 void *);
1845 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1846 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1847 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
1848 const struct xfrm_mark *mark,
1849 u32 if_id, u8 type, int dir,
1850 struct xfrm_selector *sel,
1851 struct xfrm_sec_ctx *ctx, int delete,
1852 int *err);
1853 struct xfrm_policy *xfrm_policy_byid(struct net *net,
1854 const struct xfrm_mark *mark, u32 if_id,
1855 u8 type, int dir, u32 id, int delete,
1856 int *err);
1857 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1858 void xfrm_policy_hash_rebuild(struct net *net);
1859 u32 xfrm_get_acqseq(void);
1860 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
1861 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
1862 struct netlink_ext_ack *extack);
1863 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1864 u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
1865 const xfrm_address_t *daddr,
1866 const xfrm_address_t *saddr, int create,
1867 unsigned short family);
1868 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1869
1870 #ifdef CONFIG_XFRM_MIGRATE
1871 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1872 const struct xfrm_migrate *m, int num_bundles,
1873 const struct xfrm_kmaddress *k,
1874 const struct xfrm_encap_tmpl *encap);
1875 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1876 u32 if_id);
1877 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1878 struct xfrm_migrate *m,
1879 struct xfrm_encap_tmpl *encap);
1880 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1881 struct xfrm_migrate *m, int num_bundles,
1882 struct xfrm_kmaddress *k, struct net *net,
1883 struct xfrm_encap_tmpl *encap, u32 if_id,
1884 struct netlink_ext_ack *extack);
1885 #endif
1886
1887 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1888 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1889 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1890 xfrm_address_t *addr);
1891
1892 void xfrm_input_init(void);
1893 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1894
1895 void xfrm_probe_algs(void);
1896 int xfrm_count_pfkey_auth_supported(void);
1897 int xfrm_count_pfkey_enc_supported(void);
1898 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1899 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1900 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1901 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1902 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1903 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1904 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1905 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1906 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1907 int probe);
1908
xfrm6_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b)1909 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1910 const xfrm_address_t *b)
1911 {
1912 return ipv6_addr_equal((const struct in6_addr *)a,
1913 (const struct in6_addr *)b);
1914 }
1915
xfrm_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b,sa_family_t family)1916 static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1917 const xfrm_address_t *b,
1918 sa_family_t family)
1919 {
1920 switch (family) {
1921 default:
1922 case AF_INET:
1923 return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1924 case AF_INET6:
1925 return xfrm6_addr_equal(a, b);
1926 }
1927 }
1928
xfrm_policy_id2dir(u32 index)1929 static inline int xfrm_policy_id2dir(u32 index)
1930 {
1931 return index & 7;
1932 }
1933
1934 #ifdef CONFIG_XFRM
1935 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
1936 int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1937 void xfrm_replay_notify(struct xfrm_state *x, int event);
1938 int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
1939 int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1940
xfrm_aevent_is_on(struct net * net)1941 static inline int xfrm_aevent_is_on(struct net *net)
1942 {
1943 struct sock *nlsk;
1944 int ret = 0;
1945
1946 rcu_read_lock();
1947 nlsk = rcu_dereference(net->xfrm.nlsk);
1948 if (nlsk)
1949 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1950 rcu_read_unlock();
1951 return ret;
1952 }
1953
xfrm_acquire_is_on(struct net * net)1954 static inline int xfrm_acquire_is_on(struct net *net)
1955 {
1956 struct sock *nlsk;
1957 int ret = 0;
1958
1959 rcu_read_lock();
1960 nlsk = rcu_dereference(net->xfrm.nlsk);
1961 if (nlsk)
1962 ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1963 rcu_read_unlock();
1964
1965 return ret;
1966 }
1967 #endif
1968
aead_len(struct xfrm_algo_aead * alg)1969 static inline unsigned int aead_len(struct xfrm_algo_aead *alg)
1970 {
1971 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1972 }
1973
xfrm_alg_len(const struct xfrm_algo * alg)1974 static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
1975 {
1976 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1977 }
1978
xfrm_alg_auth_len(const struct xfrm_algo_auth * alg)1979 static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1980 {
1981 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1982 }
1983
xfrm_replay_state_esn_len(struct xfrm_replay_state_esn * replay_esn)1984 static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
1985 {
1986 return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
1987 }
1988
1989 #ifdef CONFIG_XFRM_MIGRATE
xfrm_replay_clone(struct xfrm_state * x,struct xfrm_state * orig)1990 static inline int xfrm_replay_clone(struct xfrm_state *x,
1991 struct xfrm_state *orig)
1992 {
1993
1994 x->replay_esn = kmemdup(orig->replay_esn,
1995 xfrm_replay_state_esn_len(orig->replay_esn),
1996 GFP_KERNEL);
1997 if (!x->replay_esn)
1998 return -ENOMEM;
1999 x->preplay_esn = kmemdup(orig->preplay_esn,
2000 xfrm_replay_state_esn_len(orig->preplay_esn),
2001 GFP_KERNEL);
2002 if (!x->preplay_esn)
2003 return -ENOMEM;
2004
2005 return 0;
2006 }
2007
xfrm_algo_aead_clone(struct xfrm_algo_aead * orig)2008 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
2009 {
2010 return kmemdup(orig, aead_len(orig), GFP_KERNEL);
2011 }
2012
2013
xfrm_algo_clone(struct xfrm_algo * orig)2014 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
2015 {
2016 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
2017 }
2018
xfrm_algo_auth_clone(struct xfrm_algo_auth * orig)2019 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
2020 {
2021 return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
2022 }
2023
xfrm_states_put(struct xfrm_state ** states,int n)2024 static inline void xfrm_states_put(struct xfrm_state **states, int n)
2025 {
2026 int i;
2027 for (i = 0; i < n; i++)
2028 xfrm_state_put(*(states + i));
2029 }
2030
xfrm_states_delete(struct xfrm_state ** states,int n)2031 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
2032 {
2033 int i;
2034 for (i = 0; i < n; i++)
2035 xfrm_state_delete(*(states + i));
2036 }
2037 #endif
2038
2039 void __init xfrm_dev_init(void);
2040
2041 #ifdef CONFIG_XFRM_OFFLOAD
2042 void xfrm_dev_resume(struct sk_buff *skb);
2043 void xfrm_dev_backlog(struct softnet_data *sd);
2044 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
2045 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
2046 struct xfrm_user_offload *xuo,
2047 struct netlink_ext_ack *extack);
2048 int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2049 struct xfrm_user_offload *xuo, u8 dir,
2050 struct netlink_ext_ack *extack);
2051 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
2052 void xfrm_dev_state_delete(struct xfrm_state *x);
2053 void xfrm_dev_state_free(struct xfrm_state *x);
2054
xfrm_dev_state_advance_esn(struct xfrm_state * x)2055 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2056 {
2057 struct xfrm_dev_offload *xso = &x->xso;
2058 struct net_device *dev = READ_ONCE(xso->dev);
2059
2060 if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
2061 dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
2062 }
2063
xfrm_dst_offload_ok(struct dst_entry * dst)2064 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2065 {
2066 struct xfrm_state *x = dst->xfrm;
2067 struct xfrm_dst *xdst;
2068
2069 if (!x || !x->type_offload)
2070 return false;
2071
2072 xdst = (struct xfrm_dst *) dst;
2073 if (!x->xso.offload_handle && !xdst->child->xfrm)
2074 return true;
2075 if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
2076 !xdst->child->xfrm)
2077 return true;
2078
2079 return false;
2080 }
2081
xfrm_dev_policy_delete(struct xfrm_policy * x)2082 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2083 {
2084 struct xfrm_dev_offload *xdo = &x->xdo;
2085 struct net_device *dev = xdo->dev;
2086
2087 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
2088 dev->xfrmdev_ops->xdo_dev_policy_delete(x);
2089 }
2090
xfrm_dev_policy_free(struct xfrm_policy * x)2091 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2092 {
2093 struct xfrm_dev_offload *xdo = &x->xdo;
2094 struct net_device *dev = xdo->dev;
2095
2096 if (dev && dev->xfrmdev_ops) {
2097 if (dev->xfrmdev_ops->xdo_dev_policy_free)
2098 dev->xfrmdev_ops->xdo_dev_policy_free(x);
2099 xdo->dev = NULL;
2100 netdev_put(dev, &xdo->dev_tracker);
2101 }
2102 }
2103 #else
xfrm_dev_resume(struct sk_buff * skb)2104 static inline void xfrm_dev_resume(struct sk_buff *skb)
2105 {
2106 }
2107
xfrm_dev_backlog(struct softnet_data * sd)2108 static inline void xfrm_dev_backlog(struct softnet_data *sd)
2109 {
2110 }
2111
validate_xmit_xfrm(struct sk_buff * skb,netdev_features_t features,bool * again)2112 static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
2113 {
2114 return skb;
2115 }
2116
xfrm_dev_state_add(struct net * net,struct xfrm_state * x,struct xfrm_user_offload * xuo,struct netlink_ext_ack * extack)2117 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
2118 {
2119 return 0;
2120 }
2121
xfrm_dev_state_delete(struct xfrm_state * x)2122 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
2123 {
2124 }
2125
xfrm_dev_state_free(struct xfrm_state * x)2126 static inline void xfrm_dev_state_free(struct xfrm_state *x)
2127 {
2128 }
2129
xfrm_dev_policy_add(struct net * net,struct xfrm_policy * xp,struct xfrm_user_offload * xuo,u8 dir,struct netlink_ext_ack * extack)2130 static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2131 struct xfrm_user_offload *xuo, u8 dir,
2132 struct netlink_ext_ack *extack)
2133 {
2134 return 0;
2135 }
2136
xfrm_dev_policy_delete(struct xfrm_policy * x)2137 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2138 {
2139 }
2140
xfrm_dev_policy_free(struct xfrm_policy * x)2141 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2142 {
2143 }
2144
xfrm_dev_offload_ok(struct sk_buff * skb,struct xfrm_state * x)2145 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
2146 {
2147 return false;
2148 }
2149
xfrm_dev_state_advance_esn(struct xfrm_state * x)2150 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2151 {
2152 }
2153
xfrm_dst_offload_ok(struct dst_entry * dst)2154 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2155 {
2156 return false;
2157 }
2158 #endif
2159
xfrm_mark_get(struct nlattr ** attrs,struct xfrm_mark * m)2160 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
2161 {
2162 if (attrs[XFRMA_MARK])
2163 memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
2164 else
2165 m->v = m->m = 0;
2166
2167 return m->v & m->m;
2168 }
2169
xfrm_mark_put(struct sk_buff * skb,const struct xfrm_mark * m)2170 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
2171 {
2172 int ret = 0;
2173
2174 if (m->m | m->v)
2175 ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
2176 return ret;
2177 }
2178
xfrm_smark_get(__u32 mark,struct xfrm_state * x)2179 static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
2180 {
2181 struct xfrm_mark *m = &x->props.smark;
2182
2183 return (m->v & m->m) | (mark & ~m->m);
2184 }
2185
xfrm_if_id_put(struct sk_buff * skb,__u32 if_id)2186 static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
2187 {
2188 int ret = 0;
2189
2190 if (if_id)
2191 ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
2192 return ret;
2193 }
2194
xfrm_tunnel_check(struct sk_buff * skb,struct xfrm_state * x,unsigned int family)2195 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
2196 unsigned int family)
2197 {
2198 bool tunnel = false;
2199
2200 switch(family) {
2201 case AF_INET:
2202 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
2203 tunnel = true;
2204 break;
2205 case AF_INET6:
2206 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
2207 tunnel = true;
2208 break;
2209 }
2210 if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
2211 return -EINVAL;
2212
2213 return 0;
2214 }
2215
2216 extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
2217 extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
2218
2219 struct xfrm_translator {
2220 /* Allocate frag_list and put compat translation there */
2221 int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
2222
2223 /* Allocate nlmsg with 64-bit translaton of received 32-bit message */
2224 struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
2225 int maxtype, const struct nla_policy *policy,
2226 struct netlink_ext_ack *extack);
2227
2228 /* Translate 32-bit user_policy from sockptr */
2229 int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
2230
2231 struct module *owner;
2232 };
2233
2234 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2235 extern int xfrm_register_translator(struct xfrm_translator *xtr);
2236 extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
2237 extern struct xfrm_translator *xfrm_get_translator(void);
2238 extern void xfrm_put_translator(struct xfrm_translator *xtr);
2239 #else
xfrm_get_translator(void)2240 static inline struct xfrm_translator *xfrm_get_translator(void)
2241 {
2242 return NULL;
2243 }
xfrm_put_translator(struct xfrm_translator * xtr)2244 static inline void xfrm_put_translator(struct xfrm_translator *xtr)
2245 {
2246 }
2247 #endif
2248
2249 #if IS_ENABLED(CONFIG_IPV6)
xfrm6_local_dontfrag(const struct sock * sk)2250 static inline bool xfrm6_local_dontfrag(const struct sock *sk)
2251 {
2252 int proto;
2253
2254 if (!sk || sk->sk_family != AF_INET6)
2255 return false;
2256
2257 proto = sk->sk_protocol;
2258 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
2259 return inet6_test_bit(DONTFRAG, sk);
2260
2261 return false;
2262 }
2263 #endif
2264
2265 #if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
2266 (IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
2267
2268 extern struct metadata_dst __percpu *xfrm_bpf_md_dst;
2269
2270 int register_xfrm_interface_bpf(void);
2271
2272 #else
2273
register_xfrm_interface_bpf(void)2274 static inline int register_xfrm_interface_bpf(void)
2275 {
2276 return 0;
2277 }
2278
2279 #endif
2280
2281 #if IS_ENABLED(CONFIG_DEBUG_INFO_BTF)
2282 int register_xfrm_state_bpf(void);
2283 #else
register_xfrm_state_bpf(void)2284 static inline int register_xfrm_state_bpf(void)
2285 {
2286 return 0;
2287 }
2288 #endif
2289
2290 int xfrm_nat_keepalive_init(unsigned short family);
2291 void xfrm_nat_keepalive_fini(unsigned short family);
2292 int xfrm_nat_keepalive_net_init(struct net *net);
2293 int xfrm_nat_keepalive_net_fini(struct net *net);
2294 void xfrm_nat_keepalive_state_updated(struct xfrm_state *x);
2295
2296 #endif /* _NET_XFRM_H */
2297