1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _NET_XFRM_H
3 #define _NET_XFRM_H
4
5 #include <linux/compiler.h>
6 #include <linux/xfrm.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/ipsec.h>
13 #include <linux/in6.h>
14 #include <linux/mutex.h>
15 #include <linux/audit.h>
16 #include <linux/slab.h>
17 #include <linux/refcount.h>
18 #include <linux/sockptr.h>
19
20 #include <net/sock.h>
21 #include <net/dst.h>
22 #include <net/inet_dscp.h>
23 #include <net/ip.h>
24 #include <net/route.h>
25 #include <net/ipv6.h>
26 #include <net/ip6_fib.h>
27 #include <net/flow.h>
28 #include <net/gro_cells.h>
29
30 #include <linux/interrupt.h>
31
32 #ifdef CONFIG_XFRM_STATISTICS
33 #include <net/snmp.h>
34 #endif
35
36 #define XFRM_PROTO_ESP 50
37 #define XFRM_PROTO_AH 51
38 #define XFRM_PROTO_COMP 108
39 #define XFRM_PROTO_IPIP 4
40 #define XFRM_PROTO_IPV6 41
41 #define XFRM_PROTO_IPTFS IPPROTO_AGGFRAG
42 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING
43 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
44
45 #define XFRM_ALIGN4(len) (((len) + 3) & ~3)
46 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
47 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
48 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
49 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
50 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
51 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
52 MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
53
54 #ifdef CONFIG_XFRM_STATISTICS
55 #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
56 #define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
57 #else
58 #define XFRM_INC_STATS(net, field) ((void)(net))
59 #define XFRM_ADD_STATS(net, field, val) ((void)(net))
60 #endif
61
62
63 /* Organization of SPD aka "XFRM rules"
64 ------------------------------------
65
66 Basic objects:
67 - policy rule, struct xfrm_policy (=SPD entry)
68 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
69 - instance of a transformer, struct xfrm_state (=SA)
70 - template to clone xfrm_state, struct xfrm_tmpl
71
72 SPD is organized as hash table (for policies that meet minimum address prefix
73 length setting, net->xfrm.policy_hthresh). Other policies are stored in
74 lists, sorted into rbtree ordered by destination and source address networks.
75 See net/xfrm/xfrm_policy.c for details.
76
77 (To be compatible with existing pfkeyv2 implementations,
78 many rules with priority of 0x7fffffff are allowed to exist and
79 such rules are ordered in an unpredictable way, thanks to bsd folks.)
80
81 If "action" is "block", then we prohibit the flow, otherwise:
82 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
83 policy entry has list of up to XFRM_MAX_DEPTH transformations,
84 described by templates xfrm_tmpl. Each template is resolved
85 to a complete xfrm_state (see below) and we pack bundle of transformations
86 to a dst_entry returned to requester.
87
88 dst -. xfrm .-> xfrm_state #1
89 |---. child .-> dst -. xfrm .-> xfrm_state #2
90 |---. child .-> dst -. xfrm .-> xfrm_state #3
91 |---. child .-> NULL
92
93
94 Resolution of xrfm_tmpl
95 -----------------------
96 Template contains:
97 1. ->mode Mode: transport or tunnel
98 2. ->id.proto Protocol: AH/ESP/IPCOMP
99 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
100 Q: allow to resolve security gateway?
101 4. ->id.spi If not zero, static SPI.
102 5. ->saddr Local tunnel endpoint, ignored for transport mode.
103 6. ->algos List of allowed algos. Plain bitmask now.
104 Q: ealgos, aalgos, calgos. What a mess...
105 7. ->share Sharing mode.
106 Q: how to implement private sharing mode? To add struct sock* to
107 flow id?
108
109 Having this template we search through SAD searching for entries
110 with appropriate mode/proto/algo, permitted by selector.
111 If no appropriate entry found, it is requested from key manager.
112
113 PROBLEMS:
114 Q: How to find all the bundles referring to a physical path for
115 PMTU discovery? Seems, dst should contain list of all parents...
116 and enter to infinite locking hierarchy disaster.
117 No! It is easier, we will not search for them, let them find us.
118 We add genid to each dst plus pointer to genid of raw IP route,
119 pmtu disc will update pmtu on raw IP route and increase its genid.
120 dst_check() will see this for top level and trigger resyncing
121 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
122 */
123
124 struct xfrm_state_walk {
125 struct list_head all;
126 u8 state;
127 u8 dying;
128 u8 proto;
129 u32 seq;
130 struct xfrm_address_filter *filter;
131 };
132
133 enum {
134 XFRM_DEV_OFFLOAD_IN = 1,
135 XFRM_DEV_OFFLOAD_OUT,
136 XFRM_DEV_OFFLOAD_FWD,
137 };
138
139 enum {
140 XFRM_DEV_OFFLOAD_UNSPECIFIED,
141 XFRM_DEV_OFFLOAD_CRYPTO,
142 XFRM_DEV_OFFLOAD_PACKET,
143 };
144
145 enum {
146 XFRM_DEV_OFFLOAD_FLAG_ACQ = 1,
147 };
148
149 struct xfrm_dev_offload {
150 struct net_device *dev;
151 netdevice_tracker dev_tracker;
152 struct net_device *real_dev;
153 unsigned long offload_handle;
154 u8 dir : 2;
155 u8 type : 2;
156 u8 flags : 2;
157 };
158
159 struct xfrm_mode {
160 u8 encap;
161 u8 family;
162 u8 flags;
163 };
164
165 /* Flags for xfrm_mode. */
166 enum {
167 XFRM_MODE_FLAG_TUNNEL = 1,
168 };
169
170 enum xfrm_replay_mode {
171 XFRM_REPLAY_MODE_LEGACY,
172 XFRM_REPLAY_MODE_BMP,
173 XFRM_REPLAY_MODE_ESN,
174 };
175
176 /* Full description of state of transformer. */
177 struct xfrm_state {
178 possible_net_t xs_net;
179 union {
180 struct hlist_node gclist;
181 struct hlist_node bydst;
182 };
183 union {
184 struct hlist_node dev_gclist;
185 struct hlist_node bysrc;
186 };
187 struct hlist_node byspi;
188 struct hlist_node byseq;
189 struct hlist_node state_cache;
190 struct hlist_node state_cache_input;
191
192 refcount_t refcnt;
193 spinlock_t lock;
194
195 u32 pcpu_num;
196 struct xfrm_id id;
197 struct xfrm_selector sel;
198 struct xfrm_mark mark;
199 u32 if_id;
200 u32 tfcpad;
201
202 u32 genid;
203
204 /* Key manager bits */
205 struct xfrm_state_walk km;
206
207 /* Parameters of this state. */
208 struct {
209 u32 reqid;
210 u8 mode;
211 u8 replay_window;
212 u8 aalgo, ealgo, calgo;
213 u8 flags;
214 u16 family;
215 xfrm_address_t saddr;
216 int header_len;
217 int enc_hdr_len;
218 int trailer_len;
219 u32 extra_flags;
220 struct xfrm_mark smark;
221 } props;
222
223 struct xfrm_lifetime_cfg lft;
224
225 /* Data for transformer */
226 struct xfrm_algo_auth *aalg;
227 struct xfrm_algo *ealg;
228 struct xfrm_algo *calg;
229 struct xfrm_algo_aead *aead;
230 const char *geniv;
231
232 /* mapping change rate limiting */
233 __be16 new_mapping_sport;
234 u32 new_mapping; /* seconds */
235 u32 mapping_maxage; /* seconds for input SA */
236
237 /* Data for encapsulator */
238 struct xfrm_encap_tmpl *encap;
239 struct sock __rcu *encap_sk;
240
241 /* NAT keepalive */
242 u32 nat_keepalive_interval; /* seconds */
243 time64_t nat_keepalive_expiration;
244
245 /* Data for care-of address */
246 xfrm_address_t *coaddr;
247
248 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
249 struct xfrm_state *tunnel;
250
251 /* If a tunnel, number of users + 1 */
252 atomic_t tunnel_users;
253
254 /* State for replay detection */
255 struct xfrm_replay_state replay;
256 struct xfrm_replay_state_esn *replay_esn;
257
258 /* Replay detection state at the time we sent the last notification */
259 struct xfrm_replay_state preplay;
260 struct xfrm_replay_state_esn *preplay_esn;
261
262 /* replay detection mode */
263 enum xfrm_replay_mode repl_mode;
264 /* internal flag that only holds state for delayed aevent at the
265 * moment
266 */
267 u32 xflags;
268
269 /* Replay detection notification settings */
270 u32 replay_maxage;
271 u32 replay_maxdiff;
272
273 /* Replay detection notification timer */
274 struct timer_list rtimer;
275
276 /* Statistics */
277 struct xfrm_stats stats;
278
279 struct xfrm_lifetime_cur curlft;
280 struct hrtimer mtimer;
281
282 struct xfrm_dev_offload xso;
283
284 /* used to fix curlft->add_time when changing date */
285 long saved_tmo;
286
287 /* Last used time */
288 time64_t lastused;
289
290 struct page_frag xfrag;
291
292 /* Reference to data common to all the instances of this
293 * transformer. */
294 const struct xfrm_type *type;
295 struct xfrm_mode inner_mode;
296 struct xfrm_mode inner_mode_iaf;
297 struct xfrm_mode outer_mode;
298
299 const struct xfrm_type_offload *type_offload;
300
301 /* Security context */
302 struct xfrm_sec_ctx *security;
303
304 /* Private data of this transformer, format is opaque,
305 * interpreted by xfrm_type methods. */
306 void *data;
307 u8 dir;
308
309 const struct xfrm_mode_cbs *mode_cbs;
310 void *mode_data;
311 };
312
xs_net(struct xfrm_state * x)313 static inline struct net *xs_net(struct xfrm_state *x)
314 {
315 return read_pnet(&x->xs_net);
316 }
317
318 /* xflags - make enum if more show up */
319 #define XFRM_TIME_DEFER 1
320 #define XFRM_SOFT_EXPIRE 2
321
322 enum {
323 XFRM_STATE_VOID,
324 XFRM_STATE_ACQ,
325 XFRM_STATE_VALID,
326 XFRM_STATE_ERROR,
327 XFRM_STATE_EXPIRED,
328 XFRM_STATE_DEAD
329 };
330
331 /* callback structure passed from either netlink or pfkey */
332 struct km_event {
333 union {
334 u32 hard;
335 u32 proto;
336 u32 byid;
337 u32 aevent;
338 u32 type;
339 } data;
340
341 u32 seq;
342 u32 portid;
343 u32 event;
344 struct net *net;
345 };
346
347 struct xfrm_if_decode_session_result {
348 struct net *net;
349 u32 if_id;
350 };
351
352 struct xfrm_if_cb {
353 bool (*decode_session)(struct sk_buff *skb,
354 unsigned short family,
355 struct xfrm_if_decode_session_result *res);
356 };
357
358 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
359 void xfrm_if_unregister_cb(void);
360
361 struct xfrm_dst_lookup_params {
362 struct net *net;
363 dscp_t dscp;
364 int oif;
365 xfrm_address_t *saddr;
366 xfrm_address_t *daddr;
367 u32 mark;
368 __u8 ipproto;
369 union flowi_uli uli;
370 };
371
372 struct net_device;
373 struct xfrm_type;
374 struct xfrm_dst;
375 struct xfrm_policy_afinfo {
376 struct dst_ops *dst_ops;
377 struct dst_entry *(*dst_lookup)(const struct xfrm_dst_lookup_params *params);
378 int (*get_saddr)(xfrm_address_t *saddr,
379 const struct xfrm_dst_lookup_params *params);
380 int (*fill_dst)(struct xfrm_dst *xdst,
381 struct net_device *dev,
382 const struct flowi *fl);
383 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
384 };
385
386 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
387 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
388 void km_policy_notify(struct xfrm_policy *xp, int dir,
389 const struct km_event *c);
390 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
391
392 struct xfrm_tmpl;
393 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
394 struct xfrm_policy *pol);
395 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
396 int __xfrm_state_delete(struct xfrm_state *x);
397
398 struct xfrm_state_afinfo {
399 u8 family;
400 u8 proto;
401
402 const struct xfrm_type_offload *type_offload_esp;
403
404 const struct xfrm_type *type_esp;
405 const struct xfrm_type *type_ipip;
406 const struct xfrm_type *type_ipip6;
407 const struct xfrm_type *type_comp;
408 const struct xfrm_type *type_ah;
409 const struct xfrm_type *type_routing;
410 const struct xfrm_type *type_dstopts;
411
412 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
413 int (*transport_finish)(struct sk_buff *skb,
414 int async);
415 void (*local_error)(struct sk_buff *skb, u32 mtu);
416 };
417
418 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
419 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
420 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
421 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
422
423 struct xfrm_input_afinfo {
424 u8 family;
425 bool is_ipip;
426 int (*callback)(struct sk_buff *skb, u8 protocol,
427 int err);
428 };
429
430 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
431 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
432
433 void xfrm_flush_gc(void);
434 void xfrm_state_delete_tunnel(struct xfrm_state *x);
435
436 struct xfrm_type {
437 struct module *owner;
438 u8 proto;
439 u8 flags;
440 #define XFRM_TYPE_NON_FRAGMENT 1
441 #define XFRM_TYPE_REPLAY_PROT 2
442 #define XFRM_TYPE_LOCAL_COADDR 4
443 #define XFRM_TYPE_REMOTE_COADDR 8
444
445 int (*init_state)(struct xfrm_state *x,
446 struct netlink_ext_ack *extack);
447 void (*destructor)(struct xfrm_state *);
448 int (*input)(struct xfrm_state *, struct sk_buff *skb);
449 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
450 int (*reject)(struct xfrm_state *, struct sk_buff *,
451 const struct flowi *);
452 };
453
454 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
455 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
456
457 struct xfrm_type_offload {
458 struct module *owner;
459 u8 proto;
460 void (*encap)(struct xfrm_state *, struct sk_buff *pskb);
461 int (*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
462 int (*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
463 };
464
465 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
466 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
467 void xfrm_set_type_offload(struct xfrm_state *x);
xfrm_unset_type_offload(struct xfrm_state * x)468 static inline void xfrm_unset_type_offload(struct xfrm_state *x)
469 {
470 if (!x->type_offload)
471 return;
472
473 module_put(x->type_offload->owner);
474 x->type_offload = NULL;
475 }
476
477 /**
478 * struct xfrm_mode_cbs - XFRM mode callbacks
479 * @owner: module owner or NULL
480 * @init_state: Add/init mode specific state in `xfrm_state *x`
481 * @clone_state: Copy mode specific values from `orig` to new state `x`
482 * @destroy_state: Cleanup mode specific state from `xfrm_state *x`
483 * @user_init: Process mode specific netlink attributes from user
484 * @copy_to_user: Add netlink attributes to `attrs` based on state in `x`
485 * @sa_len: Return space required to store mode specific netlink attributes
486 * @get_inner_mtu: Return avail payload space after removing encap overhead
487 * @input: Process received packet from SA using mode
488 * @output: Output given packet using mode
489 * @prepare_output: Add mode specific encapsulation to packet in skb. On return
490 * `transport_header` should point at ESP header, `network_header` should
491 * point at outer IP header and `mac_header` should opint at the
492 * protocol/nexthdr field of the outer IP.
493 *
494 * One should examine and understand the specific uses of these callbacks in
495 * xfrm for further detail on how and when these functions are called. RTSL.
496 */
497 struct xfrm_mode_cbs {
498 struct module *owner;
499 int (*init_state)(struct xfrm_state *x);
500 int (*clone_state)(struct xfrm_state *x, struct xfrm_state *orig);
501 void (*destroy_state)(struct xfrm_state *x);
502 int (*user_init)(struct net *net, struct xfrm_state *x,
503 struct nlattr **attrs,
504 struct netlink_ext_ack *extack);
505 int (*copy_to_user)(struct xfrm_state *x, struct sk_buff *skb);
506 unsigned int (*sa_len)(const struct xfrm_state *x);
507 u32 (*get_inner_mtu)(struct xfrm_state *x, int outer_mtu);
508 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
509 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
510 int (*prepare_output)(struct xfrm_state *x, struct sk_buff *skb);
511 };
512
513 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs);
514 void xfrm_unregister_mode_cbs(u8 mode);
515
xfrm_af2proto(unsigned int family)516 static inline int xfrm_af2proto(unsigned int family)
517 {
518 switch(family) {
519 case AF_INET:
520 return IPPROTO_IPIP;
521 case AF_INET6:
522 return IPPROTO_IPV6;
523 default:
524 return 0;
525 }
526 }
527
xfrm_ip2inner_mode(struct xfrm_state * x,int ipproto)528 static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
529 {
530 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
531 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
532 return &x->inner_mode;
533 else
534 return &x->inner_mode_iaf;
535 }
536
537 struct xfrm_tmpl {
538 /* id in template is interpreted as:
539 * daddr - destination of tunnel, may be zero for transport mode.
540 * spi - zero to acquire spi. Not zero if spi is static, then
541 * daddr must be fixed too.
542 * proto - AH/ESP/IPCOMP
543 */
544 struct xfrm_id id;
545
546 /* Source address of tunnel. Ignored, if it is not a tunnel. */
547 xfrm_address_t saddr;
548
549 unsigned short encap_family;
550
551 u32 reqid;
552
553 /* Mode: transport, tunnel etc. */
554 u8 mode;
555
556 /* Sharing mode: unique, this session only, this user only etc. */
557 u8 share;
558
559 /* May skip this transfomration if no SA is found */
560 u8 optional;
561
562 /* Skip aalgos/ealgos/calgos checks. */
563 u8 allalgs;
564
565 /* Bit mask of algos allowed for acquisition */
566 u32 aalgos;
567 u32 ealgos;
568 u32 calgos;
569 };
570
571 #define XFRM_MAX_DEPTH 6
572 #define XFRM_MAX_OFFLOAD_DEPTH 1
573
574 struct xfrm_policy_walk_entry {
575 struct list_head all;
576 u8 dead;
577 };
578
579 struct xfrm_policy_walk {
580 struct xfrm_policy_walk_entry walk;
581 u8 type;
582 u32 seq;
583 };
584
585 struct xfrm_policy_queue {
586 struct sk_buff_head hold_queue;
587 struct timer_list hold_timer;
588 unsigned long timeout;
589 };
590
591 /**
592 * struct xfrm_policy - xfrm policy
593 * @xp_net: network namespace the policy lives in
594 * @bydst: hlist node for SPD hash table or rbtree list
595 * @byidx: hlist node for index hash table
596 * @state_cache_list: hlist head for policy cached xfrm states
597 * @lock: serialize changes to policy structure members
598 * @refcnt: reference count, freed once it reaches 0
599 * @pos: kernel internal tie-breaker to determine age of policy
600 * @timer: timer
601 * @genid: generation, used to invalidate old policies
602 * @priority: priority, set by userspace
603 * @index: policy index (autogenerated)
604 * @if_id: virtual xfrm interface id
605 * @mark: packet mark
606 * @selector: selector
607 * @lft: liftime configuration data
608 * @curlft: liftime state
609 * @walk: list head on pernet policy list
610 * @polq: queue to hold packets while aqcuire operaion in progress
611 * @bydst_reinsert: policy tree node needs to be merged
612 * @type: XFRM_POLICY_TYPE_MAIN or _SUB
613 * @action: XFRM_POLICY_ALLOW or _BLOCK
614 * @flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP
615 * @xfrm_nr: number of used templates in @xfrm_vec
616 * @family: protocol family
617 * @security: SELinux security label
618 * @xfrm_vec: array of templates to resolve state
619 * @rcu: rcu head, used to defer memory release
620 * @xdo: hardware offload state
621 */
622 struct xfrm_policy {
623 possible_net_t xp_net;
624 struct hlist_node bydst;
625 struct hlist_node byidx;
626
627 struct hlist_head state_cache_list;
628
629 /* This lock only affects elements except for entry. */
630 rwlock_t lock;
631 refcount_t refcnt;
632 u32 pos;
633 struct timer_list timer;
634
635 atomic_t genid;
636 u32 priority;
637 u32 index;
638 u32 if_id;
639 struct xfrm_mark mark;
640 struct xfrm_selector selector;
641 struct xfrm_lifetime_cfg lft;
642 struct xfrm_lifetime_cur curlft;
643 struct xfrm_policy_walk_entry walk;
644 struct xfrm_policy_queue polq;
645 bool bydst_reinsert;
646 u8 type;
647 u8 action;
648 u8 flags;
649 u8 xfrm_nr;
650 u16 family;
651 struct xfrm_sec_ctx *security;
652 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
653 struct rcu_head rcu;
654
655 struct xfrm_dev_offload xdo;
656 };
657
xp_net(const struct xfrm_policy * xp)658 static inline struct net *xp_net(const struct xfrm_policy *xp)
659 {
660 return read_pnet(&xp->xp_net);
661 }
662
663 struct xfrm_kmaddress {
664 xfrm_address_t local;
665 xfrm_address_t remote;
666 u32 reserved;
667 u16 family;
668 };
669
670 struct xfrm_migrate {
671 xfrm_address_t old_daddr;
672 xfrm_address_t old_saddr;
673 xfrm_address_t new_daddr;
674 xfrm_address_t new_saddr;
675 u8 proto;
676 u8 mode;
677 u16 reserved;
678 u32 reqid;
679 u16 old_family;
680 u16 new_family;
681 };
682
683 #define XFRM_KM_TIMEOUT 30
684 /* what happened */
685 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
686 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
687
688 /* default aevent timeout in units of 100ms */
689 #define XFRM_AE_ETIME 10
690 /* Async Event timer multiplier */
691 #define XFRM_AE_ETH_M 10
692 /* default seq threshold size */
693 #define XFRM_AE_SEQT_SIZE 2
694
695 struct xfrm_mgr {
696 struct list_head list;
697 int (*notify)(struct xfrm_state *x, const struct km_event *c);
698 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
699 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
700 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
701 int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
702 int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
703 int (*migrate)(const struct xfrm_selector *sel,
704 u8 dir, u8 type,
705 const struct xfrm_migrate *m,
706 int num_bundles,
707 const struct xfrm_kmaddress *k,
708 const struct xfrm_encap_tmpl *encap);
709 bool (*is_alive)(const struct km_event *c);
710 };
711
712 void xfrm_register_km(struct xfrm_mgr *km);
713 void xfrm_unregister_km(struct xfrm_mgr *km);
714
715 struct xfrm_tunnel_skb_cb {
716 union {
717 struct inet_skb_parm h4;
718 struct inet6_skb_parm h6;
719 } header;
720
721 union {
722 struct ip_tunnel *ip4;
723 struct ip6_tnl *ip6;
724 } tunnel;
725 };
726
727 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
728
729 /*
730 * This structure is used for the duration where packets are being
731 * transformed by IPsec. As soon as the packet leaves IPsec the
732 * area beyond the generic IP part may be overwritten.
733 */
734 struct xfrm_skb_cb {
735 struct xfrm_tunnel_skb_cb header;
736
737 /* Sequence number for replay protection. */
738 union {
739 struct {
740 __u32 low;
741 __u32 hi;
742 } output;
743 struct {
744 __be32 low;
745 __be32 hi;
746 } input;
747 } seq;
748 };
749
750 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
751
752 /*
753 * This structure is used by the afinfo prepare_input/prepare_output functions
754 * to transmit header information to the mode input/output functions.
755 */
756 struct xfrm_mode_skb_cb {
757 struct xfrm_tunnel_skb_cb header;
758
759 /* Copied from header for IPv4, always set to zero and DF for IPv6. */
760 __be16 id;
761 __be16 frag_off;
762
763 /* IP header length (excluding options or extension headers). */
764 u8 ihl;
765
766 /* TOS for IPv4, class for IPv6. */
767 u8 tos;
768
769 /* TTL for IPv4, hop limitfor IPv6. */
770 u8 ttl;
771
772 /* Protocol for IPv4, NH for IPv6. */
773 u8 protocol;
774
775 /* Option length for IPv4, zero for IPv6. */
776 u8 optlen;
777
778 /* Used by IPv6 only, zero for IPv4. */
779 u8 flow_lbl[3];
780 };
781
782 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
783
784 /*
785 * This structure is used by the input processing to locate the SPI and
786 * related information.
787 */
788 struct xfrm_spi_skb_cb {
789 struct xfrm_tunnel_skb_cb header;
790
791 unsigned int daddroff;
792 unsigned int family;
793 __be32 seq;
794 };
795
796 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
797
798 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_start(const char * op)799 static inline struct audit_buffer *xfrm_audit_start(const char *op)
800 {
801 struct audit_buffer *audit_buf = NULL;
802
803 if (audit_enabled == AUDIT_OFF)
804 return NULL;
805 audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
806 AUDIT_MAC_IPSEC_EVENT);
807 if (audit_buf == NULL)
808 return NULL;
809 audit_log_format(audit_buf, "op=%s", op);
810 return audit_buf;
811 }
812
xfrm_audit_helper_usrinfo(bool task_valid,struct audit_buffer * audit_buf)813 static inline void xfrm_audit_helper_usrinfo(bool task_valid,
814 struct audit_buffer *audit_buf)
815 {
816 const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
817 audit_get_loginuid(current) :
818 INVALID_UID);
819 const unsigned int ses = task_valid ? audit_get_sessionid(current) :
820 AUDIT_SID_UNSET;
821
822 audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
823 audit_log_task_context(audit_buf);
824 }
825
826 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
827 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
828 bool task_valid);
829 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
830 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
831 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
832 struct sk_buff *skb);
833 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
834 __be32 net_seq);
835 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
836 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
837 __be32 net_seq);
838 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
839 u8 proto);
840 #else
841
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)842 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
843 bool task_valid)
844 {
845 }
846
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)847 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
848 bool task_valid)
849 {
850 }
851
xfrm_audit_state_add(struct xfrm_state * x,int result,bool task_valid)852 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
853 bool task_valid)
854 {
855 }
856
xfrm_audit_state_delete(struct xfrm_state * x,int result,bool task_valid)857 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
858 bool task_valid)
859 {
860 }
861
xfrm_audit_state_replay_overflow(struct xfrm_state * x,struct sk_buff * skb)862 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
863 struct sk_buff *skb)
864 {
865 }
866
xfrm_audit_state_replay(struct xfrm_state * x,struct sk_buff * skb,__be32 net_seq)867 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
868 struct sk_buff *skb, __be32 net_seq)
869 {
870 }
871
xfrm_audit_state_notfound_simple(struct sk_buff * skb,u16 family)872 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
873 u16 family)
874 {
875 }
876
xfrm_audit_state_notfound(struct sk_buff * skb,u16 family,__be32 net_spi,__be32 net_seq)877 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
878 __be32 net_spi, __be32 net_seq)
879 {
880 }
881
xfrm_audit_state_icvfail(struct xfrm_state * x,struct sk_buff * skb,u8 proto)882 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
883 struct sk_buff *skb, u8 proto)
884 {
885 }
886 #endif /* CONFIG_AUDITSYSCALL */
887
xfrm_pol_hold(struct xfrm_policy * policy)888 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
889 {
890 if (likely(policy != NULL))
891 refcount_inc(&policy->refcnt);
892 }
893
894 void xfrm_policy_destroy(struct xfrm_policy *policy);
895
xfrm_pol_put(struct xfrm_policy * policy)896 static inline void xfrm_pol_put(struct xfrm_policy *policy)
897 {
898 if (refcount_dec_and_test(&policy->refcnt))
899 xfrm_policy_destroy(policy);
900 }
901
xfrm_pols_put(struct xfrm_policy ** pols,int npols)902 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
903 {
904 int i;
905 for (i = npols - 1; i >= 0; --i)
906 xfrm_pol_put(pols[i]);
907 }
908
909 void __xfrm_state_destroy(struct xfrm_state *, bool);
910
__xfrm_state_put(struct xfrm_state * x)911 static inline void __xfrm_state_put(struct xfrm_state *x)
912 {
913 refcount_dec(&x->refcnt);
914 }
915
xfrm_state_put(struct xfrm_state * x)916 static inline void xfrm_state_put(struct xfrm_state *x)
917 {
918 if (refcount_dec_and_test(&x->refcnt))
919 __xfrm_state_destroy(x, false);
920 }
921
xfrm_state_put_sync(struct xfrm_state * x)922 static inline void xfrm_state_put_sync(struct xfrm_state *x)
923 {
924 if (refcount_dec_and_test(&x->refcnt))
925 __xfrm_state_destroy(x, true);
926 }
927
xfrm_state_hold(struct xfrm_state * x)928 static inline void xfrm_state_hold(struct xfrm_state *x)
929 {
930 refcount_inc(&x->refcnt);
931 }
932
addr_match(const void * token1,const void * token2,unsigned int prefixlen)933 static inline bool addr_match(const void *token1, const void *token2,
934 unsigned int prefixlen)
935 {
936 const __be32 *a1 = token1;
937 const __be32 *a2 = token2;
938 unsigned int pdw;
939 unsigned int pbi;
940
941 pdw = prefixlen >> 5; /* num of whole u32 in prefix */
942 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
943
944 if (pdw)
945 if (memcmp(a1, a2, pdw << 2))
946 return false;
947
948 if (pbi) {
949 __be32 mask;
950
951 mask = htonl((0xffffffff) << (32 - pbi));
952
953 if ((a1[pdw] ^ a2[pdw]) & mask)
954 return false;
955 }
956
957 return true;
958 }
959
addr4_match(__be32 a1,__be32 a2,u8 prefixlen)960 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
961 {
962 /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
963 if (sizeof(long) == 4 && prefixlen == 0)
964 return true;
965 return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
966 }
967
968 static __inline__
xfrm_flowi_sport(const struct flowi * fl,const union flowi_uli * uli)969 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
970 {
971 __be16 port;
972 switch(fl->flowi_proto) {
973 case IPPROTO_TCP:
974 case IPPROTO_UDP:
975 case IPPROTO_UDPLITE:
976 case IPPROTO_SCTP:
977 port = uli->ports.sport;
978 break;
979 case IPPROTO_ICMP:
980 case IPPROTO_ICMPV6:
981 port = htons(uli->icmpt.type);
982 break;
983 case IPPROTO_MH:
984 port = htons(uli->mht.type);
985 break;
986 case IPPROTO_GRE:
987 port = htons(ntohl(uli->gre_key) >> 16);
988 break;
989 default:
990 port = 0; /*XXX*/
991 }
992 return port;
993 }
994
995 static __inline__
xfrm_flowi_dport(const struct flowi * fl,const union flowi_uli * uli)996 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
997 {
998 __be16 port;
999 switch(fl->flowi_proto) {
1000 case IPPROTO_TCP:
1001 case IPPROTO_UDP:
1002 case IPPROTO_UDPLITE:
1003 case IPPROTO_SCTP:
1004 port = uli->ports.dport;
1005 break;
1006 case IPPROTO_ICMP:
1007 case IPPROTO_ICMPV6:
1008 port = htons(uli->icmpt.code);
1009 break;
1010 case IPPROTO_GRE:
1011 port = htons(ntohl(uli->gre_key) & 0xffff);
1012 break;
1013 default:
1014 port = 0; /*XXX*/
1015 }
1016 return port;
1017 }
1018
1019 bool xfrm_selector_match(const struct xfrm_selector *sel,
1020 const struct flowi *fl, unsigned short family);
1021
1022 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1023 /* If neither has a context --> match
1024 * Otherwise, both must have a context and the sids, doi, alg must match
1025 */
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)1026 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
1027 {
1028 return ((!s1 && !s2) ||
1029 (s1 && s2 &&
1030 (s1->ctx_sid == s2->ctx_sid) &&
1031 (s1->ctx_doi == s2->ctx_doi) &&
1032 (s1->ctx_alg == s2->ctx_alg)));
1033 }
1034 #else
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)1035 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
1036 {
1037 return true;
1038 }
1039 #endif
1040
1041 /* A struct encoding bundle of transformations to apply to some set of flow.
1042 *
1043 * xdst->child points to the next element of bundle.
1044 * dst->xfrm points to an instanse of transformer.
1045 *
1046 * Due to unfortunate limitations of current routing cache, which we
1047 * have no time to fix, it mirrors struct rtable and bound to the same
1048 * routing key, including saddr,daddr. However, we can have many of
1049 * bundles differing by session id. All the bundles grow from a parent
1050 * policy rule.
1051 */
1052 struct xfrm_dst {
1053 union {
1054 struct dst_entry dst;
1055 struct rtable rt;
1056 struct rt6_info rt6;
1057 } u;
1058 struct dst_entry *route;
1059 struct dst_entry *child;
1060 struct dst_entry *path;
1061 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1062 int num_pols, num_xfrms;
1063 u32 xfrm_genid;
1064 u32 policy_genid;
1065 u32 route_mtu_cached;
1066 u32 child_mtu_cached;
1067 u32 route_cookie;
1068 u32 path_cookie;
1069 };
1070
xfrm_dst_path(const struct dst_entry * dst)1071 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
1072 {
1073 #ifdef CONFIG_XFRM
1074 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1075 const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
1076
1077 return xdst->path;
1078 }
1079 #endif
1080 return (struct dst_entry *) dst;
1081 }
1082
xfrm_dst_child(const struct dst_entry * dst)1083 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
1084 {
1085 #ifdef CONFIG_XFRM
1086 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1087 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1088 return xdst->child;
1089 }
1090 #endif
1091 return NULL;
1092 }
1093
1094 #ifdef CONFIG_XFRM
xfrm_dst_set_child(struct xfrm_dst * xdst,struct dst_entry * child)1095 static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
1096 {
1097 xdst->child = child;
1098 }
1099
xfrm_dst_destroy(struct xfrm_dst * xdst)1100 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
1101 {
1102 xfrm_pols_put(xdst->pols, xdst->num_pols);
1103 dst_release(xdst->route);
1104 if (likely(xdst->u.dst.xfrm))
1105 xfrm_state_put(xdst->u.dst.xfrm);
1106 }
1107 #endif
1108
1109 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
1110
1111 struct xfrm_if_parms {
1112 int link; /* ifindex of underlying L2 interface */
1113 u32 if_id; /* interface identifier */
1114 bool collect_md;
1115 };
1116
1117 struct xfrm_if {
1118 struct xfrm_if __rcu *next; /* next interface in list */
1119 struct net_device *dev; /* virtual device associated with interface */
1120 struct net *net; /* netns for packet i/o */
1121 struct xfrm_if_parms p; /* interface parms */
1122
1123 struct gro_cells gro_cells;
1124 };
1125
1126 struct xfrm_offload {
1127 /* Output sequence number for replay protection on offloading. */
1128 struct {
1129 __u32 low;
1130 __u32 hi;
1131 } seq;
1132
1133 __u32 flags;
1134 #define SA_DELETE_REQ 1
1135 #define CRYPTO_DONE 2
1136 #define CRYPTO_NEXT_DONE 4
1137 #define CRYPTO_FALLBACK 8
1138 #define XFRM_GSO_SEGMENT 16
1139 #define XFRM_GRO 32
1140 /* 64 is free */
1141 #define XFRM_DEV_RESUME 128
1142 #define XFRM_XMIT 256
1143
1144 __u32 status;
1145 #define CRYPTO_SUCCESS 1
1146 #define CRYPTO_GENERIC_ERROR 2
1147 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED 4
1148 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED 8
1149 #define CRYPTO_TUNNEL_AH_AUTH_FAILED 16
1150 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED 32
1151 #define CRYPTO_INVALID_PACKET_SYNTAX 64
1152 #define CRYPTO_INVALID_PROTOCOL 128
1153
1154 /* Used to keep whole l2 header for transport mode GRO */
1155 __u32 orig_mac_len;
1156
1157 __u8 proto;
1158 __u8 inner_ipproto;
1159 };
1160
1161 struct sec_path {
1162 int len;
1163 int olen;
1164 int verified_cnt;
1165
1166 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
1167 struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH];
1168 };
1169
1170 struct sec_path *secpath_set(struct sk_buff *skb);
1171
1172 static inline void
secpath_reset(struct sk_buff * skb)1173 secpath_reset(struct sk_buff *skb)
1174 {
1175 #ifdef CONFIG_XFRM
1176 skb_ext_del(skb, SKB_EXT_SEC_PATH);
1177 #endif
1178 }
1179
1180 static inline int
xfrm_addr_any(const xfrm_address_t * addr,unsigned short family)1181 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1182 {
1183 switch (family) {
1184 case AF_INET:
1185 return addr->a4 == 0;
1186 case AF_INET6:
1187 return ipv6_addr_any(&addr->in6);
1188 }
1189 return 0;
1190 }
1191
1192 static inline int
__xfrm4_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1193 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1194 {
1195 return (tmpl->saddr.a4 &&
1196 tmpl->saddr.a4 != x->props.saddr.a4);
1197 }
1198
1199 static inline int
__xfrm6_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1200 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1201 {
1202 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1203 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1204 }
1205
1206 static inline int
xfrm_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family)1207 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1208 {
1209 switch (family) {
1210 case AF_INET:
1211 return __xfrm4_state_addr_cmp(tmpl, x);
1212 case AF_INET6:
1213 return __xfrm6_state_addr_cmp(tmpl, x);
1214 }
1215 return !0;
1216 }
1217
1218 #ifdef CONFIG_XFRM
xfrm_input_state(struct sk_buff * skb)1219 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1220 {
1221 struct sec_path *sp = skb_sec_path(skb);
1222
1223 return sp->xvec[sp->len - 1];
1224 }
1225 #endif
1226
xfrm_offload(struct sk_buff * skb)1227 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1228 {
1229 #ifdef CONFIG_XFRM
1230 struct sec_path *sp = skb_sec_path(skb);
1231
1232 if (!sp || !sp->olen || sp->len != sp->olen)
1233 return NULL;
1234
1235 return &sp->ovec[sp->olen - 1];
1236 #else
1237 return NULL;
1238 #endif
1239 }
1240
1241 #ifdef CONFIG_XFRM
1242 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1243 unsigned short family);
1244
__xfrm_check_nopolicy(struct net * net,struct sk_buff * skb,int dir)1245 static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
1246 int dir)
1247 {
1248 if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
1249 return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
1250
1251 return false;
1252 }
1253
__xfrm_check_dev_nopolicy(struct sk_buff * skb,int dir,unsigned short family)1254 static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
1255 int dir, unsigned short family)
1256 {
1257 if (dir != XFRM_POLICY_OUT && family == AF_INET) {
1258 /* same dst may be used for traffic originating from
1259 * devices with different policy settings.
1260 */
1261 return IPCB(skb)->flags & IPSKB_NOPOLICY;
1262 }
1263 return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
1264 }
1265
__xfrm_policy_check2(struct sock * sk,int dir,struct sk_buff * skb,unsigned int family,int reverse)1266 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1267 struct sk_buff *skb,
1268 unsigned int family, int reverse)
1269 {
1270 struct net *net = dev_net(skb->dev);
1271 int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1272 struct xfrm_offload *xo = xfrm_offload(skb);
1273 struct xfrm_state *x;
1274
1275 if (sk && sk->sk_policy[XFRM_POLICY_IN])
1276 return __xfrm_policy_check(sk, ndir, skb, family);
1277
1278 if (xo) {
1279 x = xfrm_input_state(skb);
1280 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
1281 bool check = (xo->flags & CRYPTO_DONE) &&
1282 (xo->status & CRYPTO_SUCCESS);
1283
1284 /* The packets here are plain ones and secpath was
1285 * needed to indicate that hardware already handled
1286 * them and there is no need to do nothing in addition.
1287 *
1288 * Consume secpath which was set by drivers.
1289 */
1290 secpath_reset(skb);
1291 return check;
1292 }
1293 }
1294
1295 return __xfrm_check_nopolicy(net, skb, dir) ||
1296 __xfrm_check_dev_nopolicy(skb, dir, family) ||
1297 __xfrm_policy_check(sk, ndir, skb, family);
1298 }
1299
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1300 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1301 {
1302 return __xfrm_policy_check2(sk, dir, skb, family, 0);
1303 }
1304
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1305 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1306 {
1307 return xfrm_policy_check(sk, dir, skb, AF_INET);
1308 }
1309
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1310 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1311 {
1312 return xfrm_policy_check(sk, dir, skb, AF_INET6);
1313 }
1314
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1315 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1316 struct sk_buff *skb)
1317 {
1318 return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1319 }
1320
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1321 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1322 struct sk_buff *skb)
1323 {
1324 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1325 }
1326
1327 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1328 unsigned int family, int reverse);
1329
xfrm_decode_session(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1330 static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1331 unsigned int family)
1332 {
1333 return __xfrm_decode_session(net, skb, fl, family, 0);
1334 }
1335
xfrm_decode_session_reverse(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1336 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1337 struct flowi *fl,
1338 unsigned int family)
1339 {
1340 return __xfrm_decode_session(net, skb, fl, family, 1);
1341 }
1342
1343 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1344
xfrm_route_forward(struct sk_buff * skb,unsigned short family)1345 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1346 {
1347 struct net *net = dev_net(skb->dev);
1348
1349 if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
1350 net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
1351 return true;
1352
1353 return (skb_dst(skb)->flags & DST_NOXFRM) ||
1354 __xfrm_route_forward(skb, family);
1355 }
1356
xfrm4_route_forward(struct sk_buff * skb)1357 static inline int xfrm4_route_forward(struct sk_buff *skb)
1358 {
1359 return xfrm_route_forward(skb, AF_INET);
1360 }
1361
xfrm6_route_forward(struct sk_buff * skb)1362 static inline int xfrm6_route_forward(struct sk_buff *skb)
1363 {
1364 return xfrm_route_forward(skb, AF_INET6);
1365 }
1366
1367 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1368
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1369 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1370 {
1371 if (!sk_fullsock(osk))
1372 return 0;
1373 sk->sk_policy[0] = NULL;
1374 sk->sk_policy[1] = NULL;
1375 if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1376 return __xfrm_sk_clone_policy(sk, osk);
1377 return 0;
1378 }
1379
1380 int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1381
xfrm_sk_free_policy(struct sock * sk)1382 static inline void xfrm_sk_free_policy(struct sock *sk)
1383 {
1384 struct xfrm_policy *pol;
1385
1386 pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1387 if (unlikely(pol != NULL)) {
1388 xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1389 sk->sk_policy[0] = NULL;
1390 }
1391 pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1392 if (unlikely(pol != NULL)) {
1393 xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1394 sk->sk_policy[1] = NULL;
1395 }
1396 }
1397
1398 #else
1399
xfrm_sk_free_policy(struct sock * sk)1400 static inline void xfrm_sk_free_policy(struct sock *sk) {}
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1401 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
xfrm6_route_forward(struct sk_buff * skb)1402 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
xfrm4_route_forward(struct sk_buff * skb)1403 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1404 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1405 {
1406 return 1;
1407 }
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1408 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1409 {
1410 return 1;
1411 }
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1412 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1413 {
1414 return 1;
1415 }
xfrm_decode_session_reverse(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1416 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1417 struct flowi *fl,
1418 unsigned int family)
1419 {
1420 return -ENOSYS;
1421 }
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1422 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1423 struct sk_buff *skb)
1424 {
1425 return 1;
1426 }
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1427 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1428 struct sk_buff *skb)
1429 {
1430 return 1;
1431 }
1432 #endif
1433
1434 static __inline__
xfrm_flowi_daddr(const struct flowi * fl,unsigned short family)1435 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1436 {
1437 switch (family){
1438 case AF_INET:
1439 return (xfrm_address_t *)&fl->u.ip4.daddr;
1440 case AF_INET6:
1441 return (xfrm_address_t *)&fl->u.ip6.daddr;
1442 }
1443 return NULL;
1444 }
1445
1446 static __inline__
xfrm_flowi_saddr(const struct flowi * fl,unsigned short family)1447 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1448 {
1449 switch (family){
1450 case AF_INET:
1451 return (xfrm_address_t *)&fl->u.ip4.saddr;
1452 case AF_INET6:
1453 return (xfrm_address_t *)&fl->u.ip6.saddr;
1454 }
1455 return NULL;
1456 }
1457
1458 static __inline__
xfrm_flowi_addr_get(const struct flowi * fl,xfrm_address_t * saddr,xfrm_address_t * daddr,unsigned short family)1459 void xfrm_flowi_addr_get(const struct flowi *fl,
1460 xfrm_address_t *saddr, xfrm_address_t *daddr,
1461 unsigned short family)
1462 {
1463 switch(family) {
1464 case AF_INET:
1465 memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1466 memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1467 break;
1468 case AF_INET6:
1469 saddr->in6 = fl->u.ip6.saddr;
1470 daddr->in6 = fl->u.ip6.daddr;
1471 break;
1472 }
1473 }
1474
1475 static __inline__ int
__xfrm4_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1476 __xfrm4_state_addr_check(const struct xfrm_state *x,
1477 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1478 {
1479 if (daddr->a4 == x->id.daddr.a4 &&
1480 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1481 return 1;
1482 return 0;
1483 }
1484
1485 static __inline__ int
__xfrm6_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1486 __xfrm6_state_addr_check(const struct xfrm_state *x,
1487 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1488 {
1489 if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1490 (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1491 ipv6_addr_any((struct in6_addr *)saddr) ||
1492 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1493 return 1;
1494 return 0;
1495 }
1496
1497 static __inline__ int
xfrm_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)1498 xfrm_state_addr_check(const struct xfrm_state *x,
1499 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1500 unsigned short family)
1501 {
1502 switch (family) {
1503 case AF_INET:
1504 return __xfrm4_state_addr_check(x, daddr, saddr);
1505 case AF_INET6:
1506 return __xfrm6_state_addr_check(x, daddr, saddr);
1507 }
1508 return 0;
1509 }
1510
1511 static __inline__ int
xfrm_state_addr_flow_check(const struct xfrm_state * x,const struct flowi * fl,unsigned short family)1512 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1513 unsigned short family)
1514 {
1515 switch (family) {
1516 case AF_INET:
1517 return __xfrm4_state_addr_check(x,
1518 (const xfrm_address_t *)&fl->u.ip4.daddr,
1519 (const xfrm_address_t *)&fl->u.ip4.saddr);
1520 case AF_INET6:
1521 return __xfrm6_state_addr_check(x,
1522 (const xfrm_address_t *)&fl->u.ip6.daddr,
1523 (const xfrm_address_t *)&fl->u.ip6.saddr);
1524 }
1525 return 0;
1526 }
1527
xfrm_state_kern(const struct xfrm_state * x)1528 static inline int xfrm_state_kern(const struct xfrm_state *x)
1529 {
1530 return atomic_read(&x->tunnel_users);
1531 }
1532
xfrm_id_proto_valid(u8 proto)1533 static inline bool xfrm_id_proto_valid(u8 proto)
1534 {
1535 switch (proto) {
1536 case IPPROTO_AH:
1537 case IPPROTO_ESP:
1538 case IPPROTO_COMP:
1539 #if IS_ENABLED(CONFIG_IPV6)
1540 case IPPROTO_ROUTING:
1541 case IPPROTO_DSTOPTS:
1542 #endif
1543 return true;
1544 default:
1545 return false;
1546 }
1547 }
1548
1549 /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
xfrm_id_proto_match(u8 proto,u8 userproto)1550 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1551 {
1552 return (!userproto || proto == userproto ||
1553 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1554 proto == IPPROTO_ESP ||
1555 proto == IPPROTO_COMP)));
1556 }
1557
1558 /*
1559 * xfrm algorithm information
1560 */
1561 struct xfrm_algo_aead_info {
1562 char *geniv;
1563 u16 icv_truncbits;
1564 };
1565
1566 struct xfrm_algo_auth_info {
1567 u16 icv_truncbits;
1568 u16 icv_fullbits;
1569 };
1570
1571 struct xfrm_algo_encr_info {
1572 char *geniv;
1573 u16 blockbits;
1574 u16 defkeybits;
1575 };
1576
1577 struct xfrm_algo_comp_info {
1578 u16 threshold;
1579 };
1580
1581 struct xfrm_algo_desc {
1582 char *name;
1583 char *compat;
1584 u8 available:1;
1585 u8 pfkey_supported:1;
1586 union {
1587 struct xfrm_algo_aead_info aead;
1588 struct xfrm_algo_auth_info auth;
1589 struct xfrm_algo_encr_info encr;
1590 struct xfrm_algo_comp_info comp;
1591 } uinfo;
1592 struct sadb_alg desc;
1593 };
1594
1595 /* XFRM protocol handlers. */
1596 struct xfrm4_protocol {
1597 int (*handler)(struct sk_buff *skb);
1598 int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1599 int encap_type);
1600 int (*cb_handler)(struct sk_buff *skb, int err);
1601 int (*err_handler)(struct sk_buff *skb, u32 info);
1602
1603 struct xfrm4_protocol __rcu *next;
1604 int priority;
1605 };
1606
1607 struct xfrm6_protocol {
1608 int (*handler)(struct sk_buff *skb);
1609 int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1610 int encap_type);
1611 int (*cb_handler)(struct sk_buff *skb, int err);
1612 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1613 u8 type, u8 code, int offset, __be32 info);
1614
1615 struct xfrm6_protocol __rcu *next;
1616 int priority;
1617 };
1618
1619 /* XFRM tunnel handlers. */
1620 struct xfrm_tunnel {
1621 int (*handler)(struct sk_buff *skb);
1622 int (*cb_handler)(struct sk_buff *skb, int err);
1623 int (*err_handler)(struct sk_buff *skb, u32 info);
1624
1625 struct xfrm_tunnel __rcu *next;
1626 int priority;
1627 };
1628
1629 struct xfrm6_tunnel {
1630 int (*handler)(struct sk_buff *skb);
1631 int (*cb_handler)(struct sk_buff *skb, int err);
1632 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1633 u8 type, u8 code, int offset, __be32 info);
1634 struct xfrm6_tunnel __rcu *next;
1635 int priority;
1636 };
1637
1638 void xfrm_init(void);
1639 void xfrm4_init(void);
1640 int xfrm_state_init(struct net *net);
1641 void xfrm_state_fini(struct net *net);
1642 void xfrm4_state_init(void);
1643 void xfrm4_protocol_init(void);
1644 #ifdef CONFIG_XFRM
1645 int xfrm6_init(void);
1646 void xfrm6_fini(void);
1647 int xfrm6_state_init(void);
1648 void xfrm6_state_fini(void);
1649 int xfrm6_protocol_init(void);
1650 void xfrm6_protocol_fini(void);
1651 #else
xfrm6_init(void)1652 static inline int xfrm6_init(void)
1653 {
1654 return 0;
1655 }
xfrm6_fini(void)1656 static inline void xfrm6_fini(void)
1657 {
1658 ;
1659 }
1660 #endif
1661
1662 #ifdef CONFIG_XFRM_STATISTICS
1663 int xfrm_proc_init(struct net *net);
1664 void xfrm_proc_fini(struct net *net);
1665 #endif
1666
1667 int xfrm_sysctl_init(struct net *net);
1668 #ifdef CONFIG_SYSCTL
1669 void xfrm_sysctl_fini(struct net *net);
1670 #else
xfrm_sysctl_fini(struct net * net)1671 static inline void xfrm_sysctl_fini(struct net *net)
1672 {
1673 }
1674 #endif
1675
1676 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1677 struct xfrm_address_filter *filter);
1678 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1679 int (*func)(struct xfrm_state *, int, void*), void *);
1680 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1681 struct xfrm_state *xfrm_state_alloc(struct net *net);
1682 void xfrm_state_free(struct xfrm_state *x);
1683 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1684 const xfrm_address_t *saddr,
1685 const struct flowi *fl,
1686 struct xfrm_tmpl *tmpl,
1687 struct xfrm_policy *pol, int *err,
1688 unsigned short family, u32 if_id);
1689 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1690 xfrm_address_t *daddr,
1691 xfrm_address_t *saddr,
1692 unsigned short family,
1693 u8 mode, u8 proto, u32 reqid);
1694 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1695 unsigned short family);
1696 int xfrm_state_check_expire(struct xfrm_state *x);
1697 void xfrm_state_update_stats(struct net *net);
1698 #ifdef CONFIG_XFRM_OFFLOAD
xfrm_dev_state_update_stats(struct xfrm_state * x)1699 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
1700 {
1701 struct xfrm_dev_offload *xdo = &x->xso;
1702 struct net_device *dev = READ_ONCE(xdo->dev);
1703
1704 if (dev && dev->xfrmdev_ops &&
1705 dev->xfrmdev_ops->xdo_dev_state_update_stats)
1706 dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
1707
1708 }
1709 #else
xfrm_dev_state_update_stats(struct xfrm_state * x)1710 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
1711 #endif
1712 void xfrm_state_insert(struct xfrm_state *x);
1713 int xfrm_state_add(struct xfrm_state *x);
1714 int xfrm_state_update(struct xfrm_state *x);
1715 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1716 const xfrm_address_t *daddr, __be32 spi,
1717 u8 proto, unsigned short family);
1718 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
1719 const xfrm_address_t *daddr,
1720 __be32 spi, u8 proto,
1721 unsigned short family);
1722 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1723 const xfrm_address_t *daddr,
1724 const xfrm_address_t *saddr,
1725 u8 proto,
1726 unsigned short family);
1727 #ifdef CONFIG_XFRM_SUB_POLICY
1728 void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1729 unsigned short family);
1730 void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1731 unsigned short family);
1732 #else
xfrm_tmpl_sort(struct xfrm_tmpl ** d,struct xfrm_tmpl ** s,int n,unsigned short family)1733 static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
1734 int n, unsigned short family)
1735 {
1736 }
1737
xfrm_state_sort(struct xfrm_state ** d,struct xfrm_state ** s,int n,unsigned short family)1738 static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
1739 int n, unsigned short family)
1740 {
1741 }
1742 #endif
1743
1744 struct xfrmk_sadinfo {
1745 u32 sadhcnt; /* current hash bkts */
1746 u32 sadhmcnt; /* max allowed hash bkts */
1747 u32 sadcnt; /* current running count */
1748 };
1749
1750 struct xfrmk_spdinfo {
1751 u32 incnt;
1752 u32 outcnt;
1753 u32 fwdcnt;
1754 u32 inscnt;
1755 u32 outscnt;
1756 u32 fwdscnt;
1757 u32 spdhcnt;
1758 u32 spdhmcnt;
1759 };
1760
1761 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
1762 int xfrm_state_delete(struct xfrm_state *x);
1763 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1764 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1765 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1766 bool task_valid);
1767 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1768 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1769 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1770 int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
1771 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
1772 int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack);
1773 int xfrm_init_state(struct xfrm_state *x);
1774 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1775 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1776 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
1777 int (*finish)(struct net *, struct sock *,
1778 struct sk_buff *));
1779 int xfrm_trans_queue(struct sk_buff *skb,
1780 int (*finish)(struct net *, struct sock *,
1781 struct sk_buff *));
1782 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
1783 int xfrm_output(struct sock *sk, struct sk_buff *skb);
1784 int xfrm4_tunnel_check_size(struct sk_buff *skb);
1785 #if IS_ENABLED(CONFIG_IPV6)
1786 int xfrm6_tunnel_check_size(struct sk_buff *skb);
1787 #else
xfrm6_tunnel_check_size(struct sk_buff * skb)1788 static inline int xfrm6_tunnel_check_size(struct sk_buff *skb)
1789 {
1790 return -EMSGSIZE;
1791 }
1792 #endif
1793
1794 #if IS_ENABLED(CONFIG_NET_PKTGEN)
1795 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
1796 #endif
1797
1798 void xfrm_local_error(struct sk_buff *skb, int mtu);
1799 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1800 int encap_type);
1801 int xfrm4_transport_finish(struct sk_buff *skb, int async);
1802 int xfrm4_rcv(struct sk_buff *skb);
1803
xfrm4_rcv_spi(struct sk_buff * skb,int nexthdr,__be32 spi)1804 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1805 {
1806 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1807 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1808 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1809 return xfrm_input(skb, nexthdr, spi, 0);
1810 }
1811
1812 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1813 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1814 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1815 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1816 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1817 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1818 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1819 struct ip6_tnl *t);
1820 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1821 int encap_type);
1822 int xfrm6_transport_finish(struct sk_buff *skb, int async);
1823 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1824 int xfrm6_rcv(struct sk_buff *skb);
1825 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1826 xfrm_address_t *saddr, u8 proto);
1827 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1828 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
1829 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
1830 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1831 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1832 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1833 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1834 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1835
1836 #ifdef CONFIG_XFRM
1837 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
1838 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1839 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1840 struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1841 struct sk_buff *skb);
1842 struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1843 struct sk_buff *skb);
1844 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
1845 int optlen);
1846 #else
xfrm_user_policy(struct sock * sk,int optname,sockptr_t optval,int optlen)1847 static inline int xfrm_user_policy(struct sock *sk, int optname,
1848 sockptr_t optval, int optlen)
1849 {
1850 return -ENOPROTOOPT;
1851 }
1852 #endif
1853
1854 struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params);
1855
1856 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1857
1858 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1859 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1860 int (*func)(struct xfrm_policy *, int, int, void*),
1861 void *);
1862 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1863 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1864 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
1865 const struct xfrm_mark *mark,
1866 u32 if_id, u8 type, int dir,
1867 struct xfrm_selector *sel,
1868 struct xfrm_sec_ctx *ctx, int delete,
1869 int *err);
1870 struct xfrm_policy *xfrm_policy_byid(struct net *net,
1871 const struct xfrm_mark *mark, u32 if_id,
1872 u8 type, int dir, u32 id, int delete,
1873 int *err);
1874 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1875 void xfrm_policy_hash_rebuild(struct net *net);
1876 u32 xfrm_get_acqseq(void);
1877 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
1878 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
1879 struct netlink_ext_ack *extack);
1880 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1881 u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
1882 const xfrm_address_t *daddr,
1883 const xfrm_address_t *saddr, int create,
1884 unsigned short family);
1885 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1886
1887 #ifdef CONFIG_XFRM_MIGRATE
1888 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1889 const struct xfrm_migrate *m, int num_bundles,
1890 const struct xfrm_kmaddress *k,
1891 const struct xfrm_encap_tmpl *encap);
1892 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1893 u32 if_id);
1894 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1895 struct xfrm_migrate *m,
1896 struct xfrm_encap_tmpl *encap);
1897 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1898 struct xfrm_migrate *m, int num_bundles,
1899 struct xfrm_kmaddress *k, struct net *net,
1900 struct xfrm_encap_tmpl *encap, u32 if_id,
1901 struct netlink_ext_ack *extack);
1902 #endif
1903
1904 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1905 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1906 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1907 xfrm_address_t *addr);
1908
1909 void xfrm_input_init(void);
1910 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1911
1912 void xfrm_probe_algs(void);
1913 int xfrm_count_pfkey_auth_supported(void);
1914 int xfrm_count_pfkey_enc_supported(void);
1915 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1916 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1917 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1918 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1919 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1920 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1921 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1922 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1923 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1924 int probe);
1925
xfrm6_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b)1926 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1927 const xfrm_address_t *b)
1928 {
1929 return ipv6_addr_equal((const struct in6_addr *)a,
1930 (const struct in6_addr *)b);
1931 }
1932
xfrm_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b,sa_family_t family)1933 static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1934 const xfrm_address_t *b,
1935 sa_family_t family)
1936 {
1937 switch (family) {
1938 default:
1939 case AF_INET:
1940 return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1941 case AF_INET6:
1942 return xfrm6_addr_equal(a, b);
1943 }
1944 }
1945
xfrm_policy_id2dir(u32 index)1946 static inline int xfrm_policy_id2dir(u32 index)
1947 {
1948 return index & 7;
1949 }
1950
1951 #ifdef CONFIG_XFRM
1952 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
1953 int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1954 void xfrm_replay_notify(struct xfrm_state *x, int event);
1955 int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
1956 int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1957
xfrm_aevent_is_on(struct net * net)1958 static inline int xfrm_aevent_is_on(struct net *net)
1959 {
1960 struct sock *nlsk;
1961 int ret = 0;
1962
1963 rcu_read_lock();
1964 nlsk = rcu_dereference(net->xfrm.nlsk);
1965 if (nlsk)
1966 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1967 rcu_read_unlock();
1968 return ret;
1969 }
1970
xfrm_acquire_is_on(struct net * net)1971 static inline int xfrm_acquire_is_on(struct net *net)
1972 {
1973 struct sock *nlsk;
1974 int ret = 0;
1975
1976 rcu_read_lock();
1977 nlsk = rcu_dereference(net->xfrm.nlsk);
1978 if (nlsk)
1979 ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1980 rcu_read_unlock();
1981
1982 return ret;
1983 }
1984 #endif
1985
aead_len(struct xfrm_algo_aead * alg)1986 static inline unsigned int aead_len(struct xfrm_algo_aead *alg)
1987 {
1988 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1989 }
1990
xfrm_alg_len(const struct xfrm_algo * alg)1991 static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
1992 {
1993 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1994 }
1995
xfrm_alg_auth_len(const struct xfrm_algo_auth * alg)1996 static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1997 {
1998 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1999 }
2000
xfrm_replay_state_esn_len(struct xfrm_replay_state_esn * replay_esn)2001 static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
2002 {
2003 return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
2004 }
2005
2006 #ifdef CONFIG_XFRM_MIGRATE
xfrm_replay_clone(struct xfrm_state * x,struct xfrm_state * orig)2007 static inline int xfrm_replay_clone(struct xfrm_state *x,
2008 struct xfrm_state *orig)
2009 {
2010
2011 x->replay_esn = kmemdup(orig->replay_esn,
2012 xfrm_replay_state_esn_len(orig->replay_esn),
2013 GFP_KERNEL);
2014 if (!x->replay_esn)
2015 return -ENOMEM;
2016 x->preplay_esn = kmemdup(orig->preplay_esn,
2017 xfrm_replay_state_esn_len(orig->preplay_esn),
2018 GFP_KERNEL);
2019 if (!x->preplay_esn)
2020 return -ENOMEM;
2021
2022 return 0;
2023 }
2024
xfrm_algo_aead_clone(struct xfrm_algo_aead * orig)2025 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
2026 {
2027 return kmemdup(orig, aead_len(orig), GFP_KERNEL);
2028 }
2029
2030
xfrm_algo_clone(struct xfrm_algo * orig)2031 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
2032 {
2033 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
2034 }
2035
xfrm_algo_auth_clone(struct xfrm_algo_auth * orig)2036 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
2037 {
2038 return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
2039 }
2040
xfrm_states_put(struct xfrm_state ** states,int n)2041 static inline void xfrm_states_put(struct xfrm_state **states, int n)
2042 {
2043 int i;
2044 for (i = 0; i < n; i++)
2045 xfrm_state_put(*(states + i));
2046 }
2047
xfrm_states_delete(struct xfrm_state ** states,int n)2048 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
2049 {
2050 int i;
2051 for (i = 0; i < n; i++)
2052 xfrm_state_delete(*(states + i));
2053 }
2054 #endif
2055
2056 void __init xfrm_dev_init(void);
2057
2058 #ifdef CONFIG_XFRM_OFFLOAD
2059 void xfrm_dev_resume(struct sk_buff *skb);
2060 void xfrm_dev_backlog(struct softnet_data *sd);
2061 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
2062 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
2063 struct xfrm_user_offload *xuo,
2064 struct netlink_ext_ack *extack);
2065 int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2066 struct xfrm_user_offload *xuo, u8 dir,
2067 struct netlink_ext_ack *extack);
2068 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
2069 void xfrm_dev_state_delete(struct xfrm_state *x);
2070 void xfrm_dev_state_free(struct xfrm_state *x);
2071
xfrm_dev_state_advance_esn(struct xfrm_state * x)2072 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2073 {
2074 struct xfrm_dev_offload *xso = &x->xso;
2075 struct net_device *dev = READ_ONCE(xso->dev);
2076
2077 if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
2078 dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
2079 }
2080
xfrm_dst_offload_ok(struct dst_entry * dst)2081 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2082 {
2083 struct xfrm_state *x = dst->xfrm;
2084 struct xfrm_dst *xdst;
2085
2086 if (!x || !x->type_offload)
2087 return false;
2088
2089 xdst = (struct xfrm_dst *) dst;
2090 if (!x->xso.offload_handle && !xdst->child->xfrm)
2091 return true;
2092 if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
2093 !xdst->child->xfrm)
2094 return true;
2095
2096 return false;
2097 }
2098
xfrm_dev_policy_delete(struct xfrm_policy * x)2099 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2100 {
2101 struct xfrm_dev_offload *xdo = &x->xdo;
2102 struct net_device *dev = xdo->dev;
2103
2104 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
2105 dev->xfrmdev_ops->xdo_dev_policy_delete(x);
2106 }
2107
xfrm_dev_policy_free(struct xfrm_policy * x)2108 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2109 {
2110 struct xfrm_dev_offload *xdo = &x->xdo;
2111 struct net_device *dev = xdo->dev;
2112
2113 if (dev && dev->xfrmdev_ops) {
2114 if (dev->xfrmdev_ops->xdo_dev_policy_free)
2115 dev->xfrmdev_ops->xdo_dev_policy_free(x);
2116 xdo->dev = NULL;
2117 netdev_put(dev, &xdo->dev_tracker);
2118 }
2119 }
2120 #else
xfrm_dev_resume(struct sk_buff * skb)2121 static inline void xfrm_dev_resume(struct sk_buff *skb)
2122 {
2123 }
2124
xfrm_dev_backlog(struct softnet_data * sd)2125 static inline void xfrm_dev_backlog(struct softnet_data *sd)
2126 {
2127 }
2128
validate_xmit_xfrm(struct sk_buff * skb,netdev_features_t features,bool * again)2129 static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
2130 {
2131 return skb;
2132 }
2133
xfrm_dev_state_add(struct net * net,struct xfrm_state * x,struct xfrm_user_offload * xuo,struct netlink_ext_ack * extack)2134 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
2135 {
2136 return 0;
2137 }
2138
xfrm_dev_state_delete(struct xfrm_state * x)2139 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
2140 {
2141 }
2142
xfrm_dev_state_free(struct xfrm_state * x)2143 static inline void xfrm_dev_state_free(struct xfrm_state *x)
2144 {
2145 }
2146
xfrm_dev_policy_add(struct net * net,struct xfrm_policy * xp,struct xfrm_user_offload * xuo,u8 dir,struct netlink_ext_ack * extack)2147 static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2148 struct xfrm_user_offload *xuo, u8 dir,
2149 struct netlink_ext_ack *extack)
2150 {
2151 return 0;
2152 }
2153
xfrm_dev_policy_delete(struct xfrm_policy * x)2154 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2155 {
2156 }
2157
xfrm_dev_policy_free(struct xfrm_policy * x)2158 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2159 {
2160 }
2161
xfrm_dev_offload_ok(struct sk_buff * skb,struct xfrm_state * x)2162 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
2163 {
2164 return false;
2165 }
2166
xfrm_dev_state_advance_esn(struct xfrm_state * x)2167 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2168 {
2169 }
2170
xfrm_dst_offload_ok(struct dst_entry * dst)2171 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2172 {
2173 return false;
2174 }
2175 #endif
2176
xfrm_mark_get(struct nlattr ** attrs,struct xfrm_mark * m)2177 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
2178 {
2179 if (attrs[XFRMA_MARK])
2180 memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
2181 else
2182 m->v = m->m = 0;
2183
2184 return m->v & m->m;
2185 }
2186
xfrm_mark_put(struct sk_buff * skb,const struct xfrm_mark * m)2187 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
2188 {
2189 int ret = 0;
2190
2191 if (m->m | m->v)
2192 ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
2193 return ret;
2194 }
2195
xfrm_smark_get(__u32 mark,struct xfrm_state * x)2196 static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
2197 {
2198 struct xfrm_mark *m = &x->props.smark;
2199
2200 return (m->v & m->m) | (mark & ~m->m);
2201 }
2202
xfrm_if_id_put(struct sk_buff * skb,__u32 if_id)2203 static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
2204 {
2205 int ret = 0;
2206
2207 if (if_id)
2208 ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
2209 return ret;
2210 }
2211
xfrm_tunnel_check(struct sk_buff * skb,struct xfrm_state * x,unsigned int family)2212 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
2213 unsigned int family)
2214 {
2215 bool tunnel = false;
2216
2217 switch(family) {
2218 case AF_INET:
2219 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
2220 tunnel = true;
2221 break;
2222 case AF_INET6:
2223 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
2224 tunnel = true;
2225 break;
2226 }
2227 if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
2228 return -EINVAL;
2229
2230 return 0;
2231 }
2232
2233 extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
2234 extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
2235
2236 struct xfrm_translator {
2237 /* Allocate frag_list and put compat translation there */
2238 int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
2239
2240 /* Allocate nlmsg with 64-bit translaton of received 32-bit message */
2241 struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
2242 int maxtype, const struct nla_policy *policy,
2243 struct netlink_ext_ack *extack);
2244
2245 /* Translate 32-bit user_policy from sockptr */
2246 int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
2247
2248 struct module *owner;
2249 };
2250
2251 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2252 extern int xfrm_register_translator(struct xfrm_translator *xtr);
2253 extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
2254 extern struct xfrm_translator *xfrm_get_translator(void);
2255 extern void xfrm_put_translator(struct xfrm_translator *xtr);
2256 #else
xfrm_get_translator(void)2257 static inline struct xfrm_translator *xfrm_get_translator(void)
2258 {
2259 return NULL;
2260 }
xfrm_put_translator(struct xfrm_translator * xtr)2261 static inline void xfrm_put_translator(struct xfrm_translator *xtr)
2262 {
2263 }
2264 #endif
2265
2266 #if IS_ENABLED(CONFIG_IPV6)
xfrm6_local_dontfrag(const struct sock * sk)2267 static inline bool xfrm6_local_dontfrag(const struct sock *sk)
2268 {
2269 int proto;
2270
2271 if (!sk || sk->sk_family != AF_INET6)
2272 return false;
2273
2274 proto = sk->sk_protocol;
2275 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
2276 return inet6_test_bit(DONTFRAG, sk);
2277
2278 return false;
2279 }
2280 #endif
2281
2282 #if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
2283 (IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
2284
2285 extern struct metadata_dst __percpu *xfrm_bpf_md_dst;
2286
2287 int register_xfrm_interface_bpf(void);
2288
2289 #else
2290
register_xfrm_interface_bpf(void)2291 static inline int register_xfrm_interface_bpf(void)
2292 {
2293 return 0;
2294 }
2295
2296 #endif
2297
2298 #if IS_ENABLED(CONFIG_DEBUG_INFO_BTF)
2299 int register_xfrm_state_bpf(void);
2300 #else
register_xfrm_state_bpf(void)2301 static inline int register_xfrm_state_bpf(void)
2302 {
2303 return 0;
2304 }
2305 #endif
2306
2307 int xfrm_nat_keepalive_init(unsigned short family);
2308 void xfrm_nat_keepalive_fini(unsigned short family);
2309 int xfrm_nat_keepalive_net_init(struct net *net);
2310 int xfrm_nat_keepalive_net_fini(struct net *net);
2311 void xfrm_nat_keepalive_state_updated(struct xfrm_state *x);
2312
2313 #endif /* _NET_XFRM_H */
2314