1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _NET_XFRM_H
3 #define _NET_XFRM_H
4
5 #include <linux/compiler.h>
6 #include <linux/xfrm.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/ipsec.h>
13 #include <linux/in6.h>
14 #include <linux/mutex.h>
15 #include <linux/audit.h>
16 #include <linux/slab.h>
17 #include <linux/refcount.h>
18 #include <linux/sockptr.h>
19
20 #include <net/sock.h>
21 #include <net/dst.h>
22 #include <net/inet_dscp.h>
23 #include <net/ip.h>
24 #include <net/route.h>
25 #include <net/ipv6.h>
26 #include <net/ip6_fib.h>
27 #include <net/flow.h>
28 #include <net/gro_cells.h>
29
30 #include <linux/interrupt.h>
31
32 #ifdef CONFIG_XFRM_STATISTICS
33 #include <net/snmp.h>
34 #endif
35
36 #define XFRM_PROTO_ESP 50
37 #define XFRM_PROTO_AH 51
38 #define XFRM_PROTO_COMP 108
39 #define XFRM_PROTO_IPIP 4
40 #define XFRM_PROTO_IPV6 41
41 #define XFRM_PROTO_IPTFS IPPROTO_AGGFRAG
42 #define XFRM_PROTO_ROUTING IPPROTO_ROUTING
43 #define XFRM_PROTO_DSTOPTS IPPROTO_DSTOPTS
44
45 #define XFRM_ALIGN4(len) (((len) + 3) & ~3)
46 #define XFRM_ALIGN8(len) (((len) + 7) & ~7)
47 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
48 MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
49 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
50 MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
51 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
52 MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
53
54 #ifdef CONFIG_XFRM_STATISTICS
55 #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
56 #define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
57 #else
58 #define XFRM_INC_STATS(net, field) ((void)(net))
59 #define XFRM_ADD_STATS(net, field, val) ((void)(net))
60 #endif
61
62
63 /* Organization of SPD aka "XFRM rules"
64 ------------------------------------
65
66 Basic objects:
67 - policy rule, struct xfrm_policy (=SPD entry)
68 - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
69 - instance of a transformer, struct xfrm_state (=SA)
70 - template to clone xfrm_state, struct xfrm_tmpl
71
72 SPD is organized as hash table (for policies that meet minimum address prefix
73 length setting, net->xfrm.policy_hthresh). Other policies are stored in
74 lists, sorted into rbtree ordered by destination and source address networks.
75 See net/xfrm/xfrm_policy.c for details.
76
77 (To be compatible with existing pfkeyv2 implementations,
78 many rules with priority of 0x7fffffff are allowed to exist and
79 such rules are ordered in an unpredictable way, thanks to bsd folks.)
80
81 If "action" is "block", then we prohibit the flow, otherwise:
82 if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
83 policy entry has list of up to XFRM_MAX_DEPTH transformations,
84 described by templates xfrm_tmpl. Each template is resolved
85 to a complete xfrm_state (see below) and we pack bundle of transformations
86 to a dst_entry returned to requester.
87
88 dst -. xfrm .-> xfrm_state #1
89 |---. child .-> dst -. xfrm .-> xfrm_state #2
90 |---. child .-> dst -. xfrm .-> xfrm_state #3
91 |---. child .-> NULL
92
93
94 Resolution of xrfm_tmpl
95 -----------------------
96 Template contains:
97 1. ->mode Mode: transport or tunnel
98 2. ->id.proto Protocol: AH/ESP/IPCOMP
99 3. ->id.daddr Remote tunnel endpoint, ignored for transport mode.
100 Q: allow to resolve security gateway?
101 4. ->id.spi If not zero, static SPI.
102 5. ->saddr Local tunnel endpoint, ignored for transport mode.
103 6. ->algos List of allowed algos. Plain bitmask now.
104 Q: ealgos, aalgos, calgos. What a mess...
105 7. ->share Sharing mode.
106 Q: how to implement private sharing mode? To add struct sock* to
107 flow id?
108
109 Having this template we search through SAD searching for entries
110 with appropriate mode/proto/algo, permitted by selector.
111 If no appropriate entry found, it is requested from key manager.
112
113 PROBLEMS:
114 Q: How to find all the bundles referring to a physical path for
115 PMTU discovery? Seems, dst should contain list of all parents...
116 and enter to infinite locking hierarchy disaster.
117 No! It is easier, we will not search for them, let them find us.
118 We add genid to each dst plus pointer to genid of raw IP route,
119 pmtu disc will update pmtu on raw IP route and increase its genid.
120 dst_check() will see this for top level and trigger resyncing
121 metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
122 */
123
124 struct xfrm_state_walk {
125 struct list_head all;
126 u8 state;
127 u8 dying;
128 u8 proto;
129 u32 seq;
130 struct xfrm_address_filter *filter;
131 };
132
133 enum {
134 XFRM_DEV_OFFLOAD_IN = 1,
135 XFRM_DEV_OFFLOAD_OUT,
136 XFRM_DEV_OFFLOAD_FWD,
137 };
138
139 enum {
140 XFRM_DEV_OFFLOAD_UNSPECIFIED,
141 XFRM_DEV_OFFLOAD_CRYPTO,
142 XFRM_DEV_OFFLOAD_PACKET,
143 };
144
145 enum {
146 XFRM_DEV_OFFLOAD_FLAG_ACQ = 1,
147 };
148
149 struct xfrm_dev_offload {
150 /* The device for this offload.
151 * Device drivers should not use this directly, as that will prevent
152 * them from working with bonding device. Instead, the device passed
153 * to the add/delete callbacks should be used.
154 */
155 struct net_device *dev;
156 netdevice_tracker dev_tracker;
157 /* This is a private pointer used by the bonding driver (and eventually
158 * should be moved there). Device drivers should not use it.
159 * Protected by xfrm_state.lock AND bond.ipsec_lock in most cases,
160 * except in the .xdo_dev_state_del() flow, where only xfrm_state.lock
161 * is held.
162 */
163 struct net_device *real_dev;
164 unsigned long offload_handle;
165 u8 dir : 2;
166 u8 type : 2;
167 u8 flags : 2;
168 };
169
170 struct xfrm_mode {
171 u8 encap;
172 u8 family;
173 u8 flags;
174 };
175
176 /* Flags for xfrm_mode. */
177 enum {
178 XFRM_MODE_FLAG_TUNNEL = 1,
179 };
180
181 enum xfrm_replay_mode {
182 XFRM_REPLAY_MODE_LEGACY,
183 XFRM_REPLAY_MODE_BMP,
184 XFRM_REPLAY_MODE_ESN,
185 };
186
187 /* Full description of state of transformer. */
188 struct xfrm_state {
189 possible_net_t xs_net;
190 union {
191 struct hlist_node gclist;
192 struct hlist_node bydst;
193 };
194 union {
195 struct hlist_node dev_gclist;
196 struct hlist_node bysrc;
197 };
198 struct hlist_node byspi;
199 struct hlist_node byseq;
200 struct hlist_node state_cache;
201 struct hlist_node state_cache_input;
202
203 refcount_t refcnt;
204 spinlock_t lock;
205
206 u32 pcpu_num;
207 struct xfrm_id id;
208 struct xfrm_selector sel;
209 struct xfrm_mark mark;
210 u32 if_id;
211 u32 tfcpad;
212
213 u32 genid;
214
215 /* Key manager bits */
216 struct xfrm_state_walk km;
217
218 /* Parameters of this state. */
219 struct {
220 u32 reqid;
221 u8 mode;
222 u8 replay_window;
223 u8 aalgo, ealgo, calgo;
224 u8 flags;
225 u16 family;
226 xfrm_address_t saddr;
227 int header_len;
228 int enc_hdr_len;
229 int trailer_len;
230 u32 extra_flags;
231 struct xfrm_mark smark;
232 } props;
233
234 struct xfrm_lifetime_cfg lft;
235
236 /* Data for transformer */
237 struct xfrm_algo_auth *aalg;
238 struct xfrm_algo *ealg;
239 struct xfrm_algo *calg;
240 struct xfrm_algo_aead *aead;
241 const char *geniv;
242
243 /* mapping change rate limiting */
244 __be16 new_mapping_sport;
245 u32 new_mapping; /* seconds */
246 u32 mapping_maxage; /* seconds for input SA */
247
248 /* Data for encapsulator */
249 struct xfrm_encap_tmpl *encap;
250
251 /* NAT keepalive */
252 u32 nat_keepalive_interval; /* seconds */
253 time64_t nat_keepalive_expiration;
254
255 /* Data for care-of address */
256 xfrm_address_t *coaddr;
257
258 /* IPComp needs an IPIP tunnel for handling uncompressed packets */
259 struct xfrm_state *tunnel;
260
261 /* If a tunnel, number of users + 1 */
262 atomic_t tunnel_users;
263
264 /* State for replay detection */
265 struct xfrm_replay_state replay;
266 struct xfrm_replay_state_esn *replay_esn;
267
268 /* Replay detection state at the time we sent the last notification */
269 struct xfrm_replay_state preplay;
270 struct xfrm_replay_state_esn *preplay_esn;
271
272 /* replay detection mode */
273 enum xfrm_replay_mode repl_mode;
274 /* internal flag that only holds state for delayed aevent at the
275 * moment
276 */
277 u32 xflags;
278
279 /* Replay detection notification settings */
280 u32 replay_maxage;
281 u32 replay_maxdiff;
282
283 /* Replay detection notification timer */
284 struct timer_list rtimer;
285
286 /* Statistics */
287 struct xfrm_stats stats;
288
289 struct xfrm_lifetime_cur curlft;
290 struct hrtimer mtimer;
291
292 struct xfrm_dev_offload xso;
293
294 /* used to fix curlft->add_time when changing date */
295 long saved_tmo;
296
297 /* Last used time */
298 time64_t lastused;
299
300 struct page_frag xfrag;
301
302 /* Reference to data common to all the instances of this
303 * transformer. */
304 const struct xfrm_type *type;
305 struct xfrm_mode inner_mode;
306 struct xfrm_mode inner_mode_iaf;
307 struct xfrm_mode outer_mode;
308
309 const struct xfrm_type_offload *type_offload;
310
311 /* Security context */
312 struct xfrm_sec_ctx *security;
313
314 /* Private data of this transformer, format is opaque,
315 * interpreted by xfrm_type methods. */
316 void *data;
317 u8 dir;
318
319 const struct xfrm_mode_cbs *mode_cbs;
320 void *mode_data;
321 };
322
xs_net(struct xfrm_state * x)323 static inline struct net *xs_net(struct xfrm_state *x)
324 {
325 return read_pnet(&x->xs_net);
326 }
327
328 /* xflags - make enum if more show up */
329 #define XFRM_TIME_DEFER 1
330 #define XFRM_SOFT_EXPIRE 2
331
332 enum {
333 XFRM_STATE_VOID,
334 XFRM_STATE_ACQ,
335 XFRM_STATE_VALID,
336 XFRM_STATE_ERROR,
337 XFRM_STATE_EXPIRED,
338 XFRM_STATE_DEAD
339 };
340
341 /* callback structure passed from either netlink or pfkey */
342 struct km_event {
343 union {
344 u32 hard;
345 u32 proto;
346 u32 byid;
347 u32 aevent;
348 u32 type;
349 } data;
350
351 u32 seq;
352 u32 portid;
353 u32 event;
354 struct net *net;
355 };
356
357 struct xfrm_if_decode_session_result {
358 struct net *net;
359 u32 if_id;
360 };
361
362 struct xfrm_if_cb {
363 bool (*decode_session)(struct sk_buff *skb,
364 unsigned short family,
365 struct xfrm_if_decode_session_result *res);
366 };
367
368 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
369 void xfrm_if_unregister_cb(void);
370
371 struct xfrm_dst_lookup_params {
372 struct net *net;
373 dscp_t dscp;
374 int oif;
375 xfrm_address_t *saddr;
376 xfrm_address_t *daddr;
377 u32 mark;
378 __u8 ipproto;
379 union flowi_uli uli;
380 };
381
382 struct net_device;
383 struct xfrm_type;
384 struct xfrm_dst;
385 struct xfrm_policy_afinfo {
386 struct dst_ops *dst_ops;
387 struct dst_entry *(*dst_lookup)(const struct xfrm_dst_lookup_params *params);
388 int (*get_saddr)(xfrm_address_t *saddr,
389 const struct xfrm_dst_lookup_params *params);
390 int (*fill_dst)(struct xfrm_dst *xdst,
391 struct net_device *dev,
392 const struct flowi *fl);
393 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
394 };
395
396 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
397 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
398 void km_policy_notify(struct xfrm_policy *xp, int dir,
399 const struct km_event *c);
400 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
401
402 struct xfrm_tmpl;
403 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
404 struct xfrm_policy *pol);
405 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
406 int __xfrm_state_delete(struct xfrm_state *x);
407
408 struct xfrm_state_afinfo {
409 u8 family;
410 u8 proto;
411
412 const struct xfrm_type_offload *type_offload_esp;
413
414 const struct xfrm_type *type_esp;
415 const struct xfrm_type *type_ipip;
416 const struct xfrm_type *type_ipip6;
417 const struct xfrm_type *type_comp;
418 const struct xfrm_type *type_ah;
419 const struct xfrm_type *type_routing;
420 const struct xfrm_type *type_dstopts;
421
422 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
423 int (*transport_finish)(struct sk_buff *skb,
424 int async);
425 void (*local_error)(struct sk_buff *skb, u32 mtu);
426 };
427
428 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
429 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
430 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
431 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
432
433 struct xfrm_input_afinfo {
434 u8 family;
435 bool is_ipip;
436 int (*callback)(struct sk_buff *skb, u8 protocol,
437 int err);
438 };
439
440 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
441 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
442
443 void xfrm_flush_gc(void);
444
445 struct xfrm_type {
446 struct module *owner;
447 u8 proto;
448 u8 flags;
449 #define XFRM_TYPE_NON_FRAGMENT 1
450 #define XFRM_TYPE_REPLAY_PROT 2
451 #define XFRM_TYPE_LOCAL_COADDR 4
452 #define XFRM_TYPE_REMOTE_COADDR 8
453
454 int (*init_state)(struct xfrm_state *x,
455 struct netlink_ext_ack *extack);
456 void (*destructor)(struct xfrm_state *);
457 int (*input)(struct xfrm_state *, struct sk_buff *skb);
458 int (*output)(struct xfrm_state *, struct sk_buff *pskb);
459 int (*reject)(struct xfrm_state *, struct sk_buff *,
460 const struct flowi *);
461 };
462
463 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
464 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
465
466 struct xfrm_type_offload {
467 struct module *owner;
468 u8 proto;
469 void (*encap)(struct xfrm_state *, struct sk_buff *pskb);
470 int (*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
471 int (*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
472 };
473
474 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
475 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
476 void xfrm_set_type_offload(struct xfrm_state *x, bool try_load);
xfrm_unset_type_offload(struct xfrm_state * x)477 static inline void xfrm_unset_type_offload(struct xfrm_state *x)
478 {
479 if (!x->type_offload)
480 return;
481
482 module_put(x->type_offload->owner);
483 x->type_offload = NULL;
484 }
485
486 /**
487 * struct xfrm_mode_cbs - XFRM mode callbacks
488 * @owner: module owner or NULL
489 * @init_state: Add/init mode specific state in `xfrm_state *x`
490 * @clone_state: Copy mode specific values from `orig` to new state `x`
491 * @destroy_state: Cleanup mode specific state from `xfrm_state *x`
492 * @user_init: Process mode specific netlink attributes from user
493 * @copy_to_user: Add netlink attributes to `attrs` based on state in `x`
494 * @sa_len: Return space required to store mode specific netlink attributes
495 * @get_inner_mtu: Return avail payload space after removing encap overhead
496 * @input: Process received packet from SA using mode
497 * @output: Output given packet using mode
498 * @prepare_output: Add mode specific encapsulation to packet in skb. On return
499 * `transport_header` should point at ESP header, `network_header` should
500 * point at outer IP header and `mac_header` should opint at the
501 * protocol/nexthdr field of the outer IP.
502 *
503 * One should examine and understand the specific uses of these callbacks in
504 * xfrm for further detail on how and when these functions are called. RTSL.
505 */
506 struct xfrm_mode_cbs {
507 struct module *owner;
508 int (*init_state)(struct xfrm_state *x);
509 int (*clone_state)(struct xfrm_state *x, struct xfrm_state *orig);
510 void (*destroy_state)(struct xfrm_state *x);
511 int (*user_init)(struct net *net, struct xfrm_state *x,
512 struct nlattr **attrs,
513 struct netlink_ext_ack *extack);
514 int (*copy_to_user)(struct xfrm_state *x, struct sk_buff *skb);
515 unsigned int (*sa_len)(const struct xfrm_state *x);
516 u32 (*get_inner_mtu)(struct xfrm_state *x, int outer_mtu);
517 int (*input)(struct xfrm_state *x, struct sk_buff *skb);
518 int (*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
519 int (*prepare_output)(struct xfrm_state *x, struct sk_buff *skb);
520 };
521
522 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs);
523 void xfrm_unregister_mode_cbs(u8 mode);
524
xfrm_af2proto(unsigned int family)525 static inline int xfrm_af2proto(unsigned int family)
526 {
527 switch(family) {
528 case AF_INET:
529 return IPPROTO_IPIP;
530 case AF_INET6:
531 return IPPROTO_IPV6;
532 default:
533 return 0;
534 }
535 }
536
xfrm_ip2inner_mode(struct xfrm_state * x,int ipproto)537 static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
538 {
539 if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
540 (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
541 return &x->inner_mode;
542 else
543 return &x->inner_mode_iaf;
544 }
545
546 struct xfrm_tmpl {
547 /* id in template is interpreted as:
548 * daddr - destination of tunnel, may be zero for transport mode.
549 * spi - zero to acquire spi. Not zero if spi is static, then
550 * daddr must be fixed too.
551 * proto - AH/ESP/IPCOMP
552 */
553 struct xfrm_id id;
554
555 /* Source address of tunnel. Ignored, if it is not a tunnel. */
556 xfrm_address_t saddr;
557
558 unsigned short encap_family;
559
560 u32 reqid;
561
562 /* Mode: transport, tunnel etc. */
563 u8 mode;
564
565 /* Sharing mode: unique, this session only, this user only etc. */
566 u8 share;
567
568 /* May skip this transfomration if no SA is found */
569 u8 optional;
570
571 /* Skip aalgos/ealgos/calgos checks. */
572 u8 allalgs;
573
574 /* Bit mask of algos allowed for acquisition */
575 u32 aalgos;
576 u32 ealgos;
577 u32 calgos;
578 };
579
580 #define XFRM_MAX_DEPTH 6
581 #define XFRM_MAX_OFFLOAD_DEPTH 1
582
583 struct xfrm_policy_walk_entry {
584 struct list_head all;
585 u8 dead;
586 };
587
588 struct xfrm_policy_walk {
589 struct xfrm_policy_walk_entry walk;
590 u8 type;
591 u32 seq;
592 };
593
594 struct xfrm_policy_queue {
595 struct sk_buff_head hold_queue;
596 struct timer_list hold_timer;
597 unsigned long timeout;
598 };
599
600 /**
601 * struct xfrm_policy - xfrm policy
602 * @xp_net: network namespace the policy lives in
603 * @bydst: hlist node for SPD hash table or rbtree list
604 * @byidx: hlist node for index hash table
605 * @state_cache_list: hlist head for policy cached xfrm states
606 * @lock: serialize changes to policy structure members
607 * @refcnt: reference count, freed once it reaches 0
608 * @pos: kernel internal tie-breaker to determine age of policy
609 * @timer: timer
610 * @genid: generation, used to invalidate old policies
611 * @priority: priority, set by userspace
612 * @index: policy index (autogenerated)
613 * @if_id: virtual xfrm interface id
614 * @mark: packet mark
615 * @selector: selector
616 * @lft: liftime configuration data
617 * @curlft: liftime state
618 * @walk: list head on pernet policy list
619 * @polq: queue to hold packets while aqcuire operaion in progress
620 * @bydst_reinsert: policy tree node needs to be merged
621 * @type: XFRM_POLICY_TYPE_MAIN or _SUB
622 * @action: XFRM_POLICY_ALLOW or _BLOCK
623 * @flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP
624 * @xfrm_nr: number of used templates in @xfrm_vec
625 * @family: protocol family
626 * @security: SELinux security label
627 * @xfrm_vec: array of templates to resolve state
628 * @rcu: rcu head, used to defer memory release
629 * @xdo: hardware offload state
630 */
631 struct xfrm_policy {
632 possible_net_t xp_net;
633 struct hlist_node bydst;
634 struct hlist_node byidx;
635
636 struct hlist_head state_cache_list;
637
638 /* This lock only affects elements except for entry. */
639 rwlock_t lock;
640 refcount_t refcnt;
641 u32 pos;
642 struct timer_list timer;
643
644 atomic_t genid;
645 u32 priority;
646 u32 index;
647 u32 if_id;
648 struct xfrm_mark mark;
649 struct xfrm_selector selector;
650 struct xfrm_lifetime_cfg lft;
651 struct xfrm_lifetime_cur curlft;
652 struct xfrm_policy_walk_entry walk;
653 struct xfrm_policy_queue polq;
654 bool bydst_reinsert;
655 u8 type;
656 u8 action;
657 u8 flags;
658 u8 xfrm_nr;
659 u16 family;
660 struct xfrm_sec_ctx *security;
661 struct xfrm_tmpl xfrm_vec[XFRM_MAX_DEPTH];
662 struct rcu_head rcu;
663
664 struct xfrm_dev_offload xdo;
665 };
666
xp_net(const struct xfrm_policy * xp)667 static inline struct net *xp_net(const struct xfrm_policy *xp)
668 {
669 return read_pnet(&xp->xp_net);
670 }
671
672 struct xfrm_kmaddress {
673 xfrm_address_t local;
674 xfrm_address_t remote;
675 u32 reserved;
676 u16 family;
677 };
678
679 struct xfrm_migrate {
680 xfrm_address_t old_daddr;
681 xfrm_address_t old_saddr;
682 xfrm_address_t new_daddr;
683 xfrm_address_t new_saddr;
684 u8 proto;
685 u8 mode;
686 u16 reserved;
687 u32 reqid;
688 u16 old_family;
689 u16 new_family;
690 };
691
692 #define XFRM_KM_TIMEOUT 30
693 /* what happened */
694 #define XFRM_REPLAY_UPDATE XFRM_AE_CR
695 #define XFRM_REPLAY_TIMEOUT XFRM_AE_CE
696
697 /* default aevent timeout in units of 100ms */
698 #define XFRM_AE_ETIME 10
699 /* Async Event timer multiplier */
700 #define XFRM_AE_ETH_M 10
701 /* default seq threshold size */
702 #define XFRM_AE_SEQT_SIZE 2
703
704 struct xfrm_mgr {
705 struct list_head list;
706 int (*notify)(struct xfrm_state *x, const struct km_event *c);
707 int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
708 struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
709 int (*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
710 int (*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
711 int (*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
712 int (*migrate)(const struct xfrm_selector *sel,
713 u8 dir, u8 type,
714 const struct xfrm_migrate *m,
715 int num_bundles,
716 const struct xfrm_kmaddress *k,
717 const struct xfrm_encap_tmpl *encap);
718 bool (*is_alive)(const struct km_event *c);
719 };
720
721 void xfrm_register_km(struct xfrm_mgr *km);
722 void xfrm_unregister_km(struct xfrm_mgr *km);
723
724 struct xfrm_tunnel_skb_cb {
725 union {
726 struct inet_skb_parm h4;
727 struct inet6_skb_parm h6;
728 } header;
729
730 union {
731 struct ip_tunnel *ip4;
732 struct ip6_tnl *ip6;
733 } tunnel;
734 };
735
736 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
737
738 /*
739 * This structure is used for the duration where packets are being
740 * transformed by IPsec. As soon as the packet leaves IPsec the
741 * area beyond the generic IP part may be overwritten.
742 */
743 struct xfrm_skb_cb {
744 struct xfrm_tunnel_skb_cb header;
745
746 /* Sequence number for replay protection. */
747 union {
748 struct {
749 __u32 low;
750 __u32 hi;
751 } output;
752 struct {
753 __be32 low;
754 __be32 hi;
755 } input;
756 } seq;
757 };
758
759 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
760
761 /*
762 * This structure is used by the afinfo prepare_input/prepare_output functions
763 * to transmit header information to the mode input/output functions.
764 */
765 struct xfrm_mode_skb_cb {
766 struct xfrm_tunnel_skb_cb header;
767
768 /* Copied from header for IPv4, always set to zero and DF for IPv6. */
769 __be16 id;
770 __be16 frag_off;
771
772 /* IP header length (excluding options or extension headers). */
773 u8 ihl;
774
775 /* TOS for IPv4, class for IPv6. */
776 u8 tos;
777
778 /* TTL for IPv4, hop limitfor IPv6. */
779 u8 ttl;
780
781 /* Protocol for IPv4, NH for IPv6. */
782 u8 protocol;
783
784 /* Option length for IPv4, zero for IPv6. */
785 u8 optlen;
786
787 /* Used by IPv6 only, zero for IPv4. */
788 u8 flow_lbl[3];
789 };
790
791 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
792
793 /*
794 * This structure is used by the input processing to locate the SPI and
795 * related information.
796 */
797 struct xfrm_spi_skb_cb {
798 struct xfrm_tunnel_skb_cb header;
799
800 unsigned int daddroff;
801 unsigned int family;
802 __be32 seq;
803 };
804
805 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
806
807 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_start(const char * op)808 static inline struct audit_buffer *xfrm_audit_start(const char *op)
809 {
810 struct audit_buffer *audit_buf = NULL;
811
812 if (audit_enabled == AUDIT_OFF)
813 return NULL;
814 audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
815 AUDIT_MAC_IPSEC_EVENT);
816 if (audit_buf == NULL)
817 return NULL;
818 audit_log_format(audit_buf, "op=%s", op);
819 return audit_buf;
820 }
821
xfrm_audit_helper_usrinfo(bool task_valid,struct audit_buffer * audit_buf)822 static inline void xfrm_audit_helper_usrinfo(bool task_valid,
823 struct audit_buffer *audit_buf)
824 {
825 const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
826 audit_get_loginuid(current) :
827 INVALID_UID);
828 const unsigned int ses = task_valid ? audit_get_sessionid(current) :
829 AUDIT_SID_UNSET;
830
831 audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
832 audit_log_task_context(audit_buf);
833 }
834
835 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
836 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
837 bool task_valid);
838 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
839 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
840 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
841 struct sk_buff *skb);
842 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
843 __be32 net_seq);
844 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
845 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
846 __be32 net_seq);
847 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
848 u8 proto);
849 #else
850
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)851 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
852 bool task_valid)
853 {
854 }
855
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)856 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
857 bool task_valid)
858 {
859 }
860
xfrm_audit_state_add(struct xfrm_state * x,int result,bool task_valid)861 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
862 bool task_valid)
863 {
864 }
865
xfrm_audit_state_delete(struct xfrm_state * x,int result,bool task_valid)866 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
867 bool task_valid)
868 {
869 }
870
xfrm_audit_state_replay_overflow(struct xfrm_state * x,struct sk_buff * skb)871 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
872 struct sk_buff *skb)
873 {
874 }
875
xfrm_audit_state_replay(struct xfrm_state * x,struct sk_buff * skb,__be32 net_seq)876 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
877 struct sk_buff *skb, __be32 net_seq)
878 {
879 }
880
xfrm_audit_state_notfound_simple(struct sk_buff * skb,u16 family)881 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
882 u16 family)
883 {
884 }
885
xfrm_audit_state_notfound(struct sk_buff * skb,u16 family,__be32 net_spi,__be32 net_seq)886 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
887 __be32 net_spi, __be32 net_seq)
888 {
889 }
890
xfrm_audit_state_icvfail(struct xfrm_state * x,struct sk_buff * skb,u8 proto)891 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
892 struct sk_buff *skb, u8 proto)
893 {
894 }
895 #endif /* CONFIG_AUDITSYSCALL */
896
xfrm_pol_hold(struct xfrm_policy * policy)897 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
898 {
899 if (likely(policy != NULL))
900 refcount_inc(&policy->refcnt);
901 }
902
903 void xfrm_policy_destroy(struct xfrm_policy *policy);
904
xfrm_pol_put(struct xfrm_policy * policy)905 static inline void xfrm_pol_put(struct xfrm_policy *policy)
906 {
907 if (refcount_dec_and_test(&policy->refcnt))
908 xfrm_policy_destroy(policy);
909 }
910
xfrm_pols_put(struct xfrm_policy ** pols,int npols)911 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
912 {
913 int i;
914 for (i = npols - 1; i >= 0; --i)
915 xfrm_pol_put(pols[i]);
916 }
917
918 void __xfrm_state_destroy(struct xfrm_state *);
919
__xfrm_state_put(struct xfrm_state * x)920 static inline void __xfrm_state_put(struct xfrm_state *x)
921 {
922 refcount_dec(&x->refcnt);
923 }
924
xfrm_state_put(struct xfrm_state * x)925 static inline void xfrm_state_put(struct xfrm_state *x)
926 {
927 if (refcount_dec_and_test(&x->refcnt))
928 __xfrm_state_destroy(x);
929 }
930
xfrm_state_hold(struct xfrm_state * x)931 static inline void xfrm_state_hold(struct xfrm_state *x)
932 {
933 refcount_inc(&x->refcnt);
934 }
935
addr_match(const void * token1,const void * token2,unsigned int prefixlen)936 static inline bool addr_match(const void *token1, const void *token2,
937 unsigned int prefixlen)
938 {
939 const __be32 *a1 = token1;
940 const __be32 *a2 = token2;
941 unsigned int pdw;
942 unsigned int pbi;
943
944 pdw = prefixlen >> 5; /* num of whole u32 in prefix */
945 pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
946
947 if (pdw)
948 if (memcmp(a1, a2, pdw << 2))
949 return false;
950
951 if (pbi) {
952 __be32 mask;
953
954 mask = htonl((0xffffffff) << (32 - pbi));
955
956 if ((a1[pdw] ^ a2[pdw]) & mask)
957 return false;
958 }
959
960 return true;
961 }
962
addr4_match(__be32 a1,__be32 a2,u8 prefixlen)963 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
964 {
965 /* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
966 if (sizeof(long) == 4 && prefixlen == 0)
967 return true;
968 return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
969 }
970
971 static __inline__
xfrm_flowi_sport(const struct flowi * fl,const union flowi_uli * uli)972 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
973 {
974 __be16 port;
975 switch(fl->flowi_proto) {
976 case IPPROTO_TCP:
977 case IPPROTO_UDP:
978 case IPPROTO_UDPLITE:
979 case IPPROTO_SCTP:
980 port = uli->ports.sport;
981 break;
982 case IPPROTO_ICMP:
983 case IPPROTO_ICMPV6:
984 port = htons(uli->icmpt.type);
985 break;
986 case IPPROTO_MH:
987 port = htons(uli->mht.type);
988 break;
989 case IPPROTO_GRE:
990 port = htons(ntohl(uli->gre_key) >> 16);
991 break;
992 default:
993 port = 0; /*XXX*/
994 }
995 return port;
996 }
997
998 static __inline__
xfrm_flowi_dport(const struct flowi * fl,const union flowi_uli * uli)999 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
1000 {
1001 __be16 port;
1002 switch(fl->flowi_proto) {
1003 case IPPROTO_TCP:
1004 case IPPROTO_UDP:
1005 case IPPROTO_UDPLITE:
1006 case IPPROTO_SCTP:
1007 port = uli->ports.dport;
1008 break;
1009 case IPPROTO_ICMP:
1010 case IPPROTO_ICMPV6:
1011 port = htons(uli->icmpt.code);
1012 break;
1013 case IPPROTO_GRE:
1014 port = htons(ntohl(uli->gre_key) & 0xffff);
1015 break;
1016 default:
1017 port = 0; /*XXX*/
1018 }
1019 return port;
1020 }
1021
1022 bool xfrm_selector_match(const struct xfrm_selector *sel,
1023 const struct flowi *fl, unsigned short family);
1024
1025 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1026 /* If neither has a context --> match
1027 * Otherwise, both must have a context and the sids, doi, alg must match
1028 */
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)1029 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
1030 {
1031 return ((!s1 && !s2) ||
1032 (s1 && s2 &&
1033 (s1->ctx_sid == s2->ctx_sid) &&
1034 (s1->ctx_doi == s2->ctx_doi) &&
1035 (s1->ctx_alg == s2->ctx_alg)));
1036 }
1037 #else
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)1038 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
1039 {
1040 return true;
1041 }
1042 #endif
1043
1044 /* A struct encoding bundle of transformations to apply to some set of flow.
1045 *
1046 * xdst->child points to the next element of bundle.
1047 * dst->xfrm points to an instanse of transformer.
1048 *
1049 * Due to unfortunate limitations of current routing cache, which we
1050 * have no time to fix, it mirrors struct rtable and bound to the same
1051 * routing key, including saddr,daddr. However, we can have many of
1052 * bundles differing by session id. All the bundles grow from a parent
1053 * policy rule.
1054 */
1055 struct xfrm_dst {
1056 union {
1057 struct dst_entry dst;
1058 struct rtable rt;
1059 struct rt6_info rt6;
1060 } u;
1061 struct dst_entry *route;
1062 struct dst_entry *child;
1063 struct dst_entry *path;
1064 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1065 int num_pols, num_xfrms;
1066 u32 xfrm_genid;
1067 u32 policy_genid;
1068 u32 route_mtu_cached;
1069 u32 child_mtu_cached;
1070 u32 route_cookie;
1071 u32 path_cookie;
1072 };
1073
xfrm_dst_path(const struct dst_entry * dst)1074 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
1075 {
1076 #ifdef CONFIG_XFRM
1077 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1078 const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
1079
1080 return xdst->path;
1081 }
1082 #endif
1083 return (struct dst_entry *) dst;
1084 }
1085
xfrm_dst_child(const struct dst_entry * dst)1086 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
1087 {
1088 #ifdef CONFIG_XFRM
1089 if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1090 struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1091 return xdst->child;
1092 }
1093 #endif
1094 return NULL;
1095 }
1096
1097 #ifdef CONFIG_XFRM
xfrm_dst_set_child(struct xfrm_dst * xdst,struct dst_entry * child)1098 static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
1099 {
1100 xdst->child = child;
1101 }
1102
xfrm_dst_destroy(struct xfrm_dst * xdst)1103 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
1104 {
1105 xfrm_pols_put(xdst->pols, xdst->num_pols);
1106 dst_release(xdst->route);
1107 if (likely(xdst->u.dst.xfrm))
1108 xfrm_state_put(xdst->u.dst.xfrm);
1109 }
1110 #endif
1111
1112 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
1113
1114 struct xfrm_if_parms {
1115 int link; /* ifindex of underlying L2 interface */
1116 u32 if_id; /* interface identifier */
1117 bool collect_md;
1118 };
1119
1120 struct xfrm_if {
1121 struct xfrm_if __rcu *next; /* next interface in list */
1122 struct net_device *dev; /* virtual device associated with interface */
1123 struct net *net; /* netns for packet i/o */
1124 struct xfrm_if_parms p; /* interface parms */
1125
1126 struct gro_cells gro_cells;
1127 };
1128
1129 struct xfrm_offload {
1130 /* Output sequence number for replay protection on offloading. */
1131 struct {
1132 __u32 low;
1133 __u32 hi;
1134 } seq;
1135
1136 __u32 flags;
1137 #define SA_DELETE_REQ 1
1138 #define CRYPTO_DONE 2
1139 #define CRYPTO_NEXT_DONE 4
1140 #define CRYPTO_FALLBACK 8
1141 #define XFRM_GSO_SEGMENT 16
1142 #define XFRM_GRO 32
1143 /* 64 is free */
1144 #define XFRM_DEV_RESUME 128
1145 #define XFRM_XMIT 256
1146
1147 __u32 status;
1148 #define CRYPTO_SUCCESS 1
1149 #define CRYPTO_GENERIC_ERROR 2
1150 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED 4
1151 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED 8
1152 #define CRYPTO_TUNNEL_AH_AUTH_FAILED 16
1153 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED 32
1154 #define CRYPTO_INVALID_PACKET_SYNTAX 64
1155 #define CRYPTO_INVALID_PROTOCOL 128
1156
1157 /* Used to keep whole l2 header for transport mode GRO */
1158 __u32 orig_mac_len;
1159
1160 __u8 proto;
1161 __u8 inner_ipproto;
1162 };
1163
1164 struct sec_path {
1165 int len;
1166 int olen;
1167 int verified_cnt;
1168
1169 struct xfrm_state *xvec[XFRM_MAX_DEPTH];
1170 struct xfrm_offload ovec[XFRM_MAX_OFFLOAD_DEPTH];
1171 };
1172
1173 struct sec_path *secpath_set(struct sk_buff *skb);
1174
1175 static inline void
secpath_reset(struct sk_buff * skb)1176 secpath_reset(struct sk_buff *skb)
1177 {
1178 #ifdef CONFIG_XFRM
1179 skb_ext_del(skb, SKB_EXT_SEC_PATH);
1180 #endif
1181 }
1182
1183 static inline int
xfrm_addr_any(const xfrm_address_t * addr,unsigned short family)1184 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1185 {
1186 switch (family) {
1187 case AF_INET:
1188 return addr->a4 == 0;
1189 case AF_INET6:
1190 return ipv6_addr_any(&addr->in6);
1191 }
1192 return 0;
1193 }
1194
1195 static inline int
__xfrm4_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1196 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1197 {
1198 return (tmpl->saddr.a4 &&
1199 tmpl->saddr.a4 != x->props.saddr.a4);
1200 }
1201
1202 static inline int
__xfrm6_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1203 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1204 {
1205 return (!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1206 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1207 }
1208
1209 static inline int
xfrm_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family)1210 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1211 {
1212 switch (family) {
1213 case AF_INET:
1214 return __xfrm4_state_addr_cmp(tmpl, x);
1215 case AF_INET6:
1216 return __xfrm6_state_addr_cmp(tmpl, x);
1217 }
1218 return !0;
1219 }
1220
1221 #ifdef CONFIG_XFRM
xfrm_input_state(struct sk_buff * skb)1222 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1223 {
1224 struct sec_path *sp = skb_sec_path(skb);
1225
1226 return sp->xvec[sp->len - 1];
1227 }
1228 #endif
1229
xfrm_offload(struct sk_buff * skb)1230 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1231 {
1232 #ifdef CONFIG_XFRM
1233 struct sec_path *sp = skb_sec_path(skb);
1234
1235 if (!sp || !sp->olen || sp->len != sp->olen)
1236 return NULL;
1237
1238 return &sp->ovec[sp->olen - 1];
1239 #else
1240 return NULL;
1241 #endif
1242 }
1243
1244 #ifdef CONFIG_XFRM
1245 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1246 unsigned short family);
1247
__xfrm_check_nopolicy(struct net * net,struct sk_buff * skb,int dir)1248 static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
1249 int dir)
1250 {
1251 if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
1252 return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
1253
1254 return false;
1255 }
1256
__xfrm_check_dev_nopolicy(struct sk_buff * skb,int dir,unsigned short family)1257 static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
1258 int dir, unsigned short family)
1259 {
1260 if (dir != XFRM_POLICY_OUT && family == AF_INET) {
1261 /* same dst may be used for traffic originating from
1262 * devices with different policy settings.
1263 */
1264 return IPCB(skb)->flags & IPSKB_NOPOLICY;
1265 }
1266 return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
1267 }
1268
__xfrm_policy_check2(struct sock * sk,int dir,struct sk_buff * skb,unsigned int family,int reverse)1269 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1270 struct sk_buff *skb,
1271 unsigned int family, int reverse)
1272 {
1273 struct net *net = dev_net(skb->dev);
1274 int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1275 struct xfrm_offload *xo = xfrm_offload(skb);
1276 struct xfrm_state *x;
1277
1278 if (sk && sk->sk_policy[XFRM_POLICY_IN])
1279 return __xfrm_policy_check(sk, ndir, skb, family);
1280
1281 if (xo) {
1282 x = xfrm_input_state(skb);
1283 if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
1284 bool check = (xo->flags & CRYPTO_DONE) &&
1285 (xo->status & CRYPTO_SUCCESS);
1286
1287 /* The packets here are plain ones and secpath was
1288 * needed to indicate that hardware already handled
1289 * them and there is no need to do nothing in addition.
1290 *
1291 * Consume secpath which was set by drivers.
1292 */
1293 secpath_reset(skb);
1294 return check;
1295 }
1296 }
1297
1298 return __xfrm_check_nopolicy(net, skb, dir) ||
1299 __xfrm_check_dev_nopolicy(skb, dir, family) ||
1300 __xfrm_policy_check(sk, ndir, skb, family);
1301 }
1302
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1303 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1304 {
1305 return __xfrm_policy_check2(sk, dir, skb, family, 0);
1306 }
1307
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1308 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1309 {
1310 return xfrm_policy_check(sk, dir, skb, AF_INET);
1311 }
1312
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1313 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1314 {
1315 return xfrm_policy_check(sk, dir, skb, AF_INET6);
1316 }
1317
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1318 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1319 struct sk_buff *skb)
1320 {
1321 return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1322 }
1323
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1324 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1325 struct sk_buff *skb)
1326 {
1327 return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1328 }
1329
1330 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1331 unsigned int family, int reverse);
1332
xfrm_decode_session(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1333 static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1334 unsigned int family)
1335 {
1336 return __xfrm_decode_session(net, skb, fl, family, 0);
1337 }
1338
xfrm_decode_session_reverse(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1339 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1340 struct flowi *fl,
1341 unsigned int family)
1342 {
1343 return __xfrm_decode_session(net, skb, fl, family, 1);
1344 }
1345
1346 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1347
xfrm_route_forward(struct sk_buff * skb,unsigned short family)1348 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1349 {
1350 struct net *net = dev_net(skb->dev);
1351
1352 if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
1353 net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
1354 return true;
1355
1356 return (skb_dst(skb)->flags & DST_NOXFRM) ||
1357 __xfrm_route_forward(skb, family);
1358 }
1359
xfrm4_route_forward(struct sk_buff * skb)1360 static inline int xfrm4_route_forward(struct sk_buff *skb)
1361 {
1362 return xfrm_route_forward(skb, AF_INET);
1363 }
1364
xfrm6_route_forward(struct sk_buff * skb)1365 static inline int xfrm6_route_forward(struct sk_buff *skb)
1366 {
1367 return xfrm_route_forward(skb, AF_INET6);
1368 }
1369
1370 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1371
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1372 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1373 {
1374 if (!sk_fullsock(osk))
1375 return 0;
1376 sk->sk_policy[0] = NULL;
1377 sk->sk_policy[1] = NULL;
1378 if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1379 return __xfrm_sk_clone_policy(sk, osk);
1380 return 0;
1381 }
1382
1383 int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1384
xfrm_sk_free_policy(struct sock * sk)1385 static inline void xfrm_sk_free_policy(struct sock *sk)
1386 {
1387 struct xfrm_policy *pol;
1388
1389 pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1390 if (unlikely(pol != NULL)) {
1391 xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1392 sk->sk_policy[0] = NULL;
1393 }
1394 pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1395 if (unlikely(pol != NULL)) {
1396 xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1397 sk->sk_policy[1] = NULL;
1398 }
1399 }
1400
1401 #else
1402
xfrm_sk_free_policy(struct sock * sk)1403 static inline void xfrm_sk_free_policy(struct sock *sk) {}
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1404 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
xfrm6_route_forward(struct sk_buff * skb)1405 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
xfrm4_route_forward(struct sk_buff * skb)1406 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1407 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1408 {
1409 return 1;
1410 }
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1411 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1412 {
1413 return 1;
1414 }
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1415 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1416 {
1417 return 1;
1418 }
xfrm_decode_session_reverse(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1419 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1420 struct flowi *fl,
1421 unsigned int family)
1422 {
1423 return -ENOSYS;
1424 }
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1425 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1426 struct sk_buff *skb)
1427 {
1428 return 1;
1429 }
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1430 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1431 struct sk_buff *skb)
1432 {
1433 return 1;
1434 }
1435 #endif
1436
1437 static __inline__
xfrm_flowi_daddr(const struct flowi * fl,unsigned short family)1438 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1439 {
1440 switch (family){
1441 case AF_INET:
1442 return (xfrm_address_t *)&fl->u.ip4.daddr;
1443 case AF_INET6:
1444 return (xfrm_address_t *)&fl->u.ip6.daddr;
1445 }
1446 return NULL;
1447 }
1448
1449 static __inline__
xfrm_flowi_saddr(const struct flowi * fl,unsigned short family)1450 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1451 {
1452 switch (family){
1453 case AF_INET:
1454 return (xfrm_address_t *)&fl->u.ip4.saddr;
1455 case AF_INET6:
1456 return (xfrm_address_t *)&fl->u.ip6.saddr;
1457 }
1458 return NULL;
1459 }
1460
1461 static __inline__
xfrm_flowi_addr_get(const struct flowi * fl,xfrm_address_t * saddr,xfrm_address_t * daddr,unsigned short family)1462 void xfrm_flowi_addr_get(const struct flowi *fl,
1463 xfrm_address_t *saddr, xfrm_address_t *daddr,
1464 unsigned short family)
1465 {
1466 switch(family) {
1467 case AF_INET:
1468 memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1469 memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1470 break;
1471 case AF_INET6:
1472 saddr->in6 = fl->u.ip6.saddr;
1473 daddr->in6 = fl->u.ip6.daddr;
1474 break;
1475 }
1476 }
1477
1478 static __inline__ int
__xfrm4_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1479 __xfrm4_state_addr_check(const struct xfrm_state *x,
1480 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1481 {
1482 if (daddr->a4 == x->id.daddr.a4 &&
1483 (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1484 return 1;
1485 return 0;
1486 }
1487
1488 static __inline__ int
__xfrm6_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1489 __xfrm6_state_addr_check(const struct xfrm_state *x,
1490 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1491 {
1492 if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1493 (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1494 ipv6_addr_any((struct in6_addr *)saddr) ||
1495 ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1496 return 1;
1497 return 0;
1498 }
1499
1500 static __inline__ int
xfrm_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)1501 xfrm_state_addr_check(const struct xfrm_state *x,
1502 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1503 unsigned short family)
1504 {
1505 switch (family) {
1506 case AF_INET:
1507 return __xfrm4_state_addr_check(x, daddr, saddr);
1508 case AF_INET6:
1509 return __xfrm6_state_addr_check(x, daddr, saddr);
1510 }
1511 return 0;
1512 }
1513
1514 static __inline__ int
xfrm_state_addr_flow_check(const struct xfrm_state * x,const struct flowi * fl,unsigned short family)1515 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1516 unsigned short family)
1517 {
1518 switch (family) {
1519 case AF_INET:
1520 return __xfrm4_state_addr_check(x,
1521 (const xfrm_address_t *)&fl->u.ip4.daddr,
1522 (const xfrm_address_t *)&fl->u.ip4.saddr);
1523 case AF_INET6:
1524 return __xfrm6_state_addr_check(x,
1525 (const xfrm_address_t *)&fl->u.ip6.daddr,
1526 (const xfrm_address_t *)&fl->u.ip6.saddr);
1527 }
1528 return 0;
1529 }
1530
xfrm_state_kern(const struct xfrm_state * x)1531 static inline int xfrm_state_kern(const struct xfrm_state *x)
1532 {
1533 return atomic_read(&x->tunnel_users);
1534 }
1535
xfrm_id_proto_valid(u8 proto)1536 static inline bool xfrm_id_proto_valid(u8 proto)
1537 {
1538 switch (proto) {
1539 case IPPROTO_AH:
1540 case IPPROTO_ESP:
1541 case IPPROTO_COMP:
1542 #if IS_ENABLED(CONFIG_IPV6)
1543 case IPPROTO_ROUTING:
1544 case IPPROTO_DSTOPTS:
1545 #endif
1546 return true;
1547 default:
1548 return false;
1549 }
1550 }
1551
1552 /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
xfrm_id_proto_match(u8 proto,u8 userproto)1553 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1554 {
1555 return (!userproto || proto == userproto ||
1556 (userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1557 proto == IPPROTO_ESP ||
1558 proto == IPPROTO_COMP)));
1559 }
1560
1561 /*
1562 * xfrm algorithm information
1563 */
1564 struct xfrm_algo_aead_info {
1565 char *geniv;
1566 u16 icv_truncbits;
1567 };
1568
1569 struct xfrm_algo_auth_info {
1570 u16 icv_truncbits;
1571 u16 icv_fullbits;
1572 };
1573
1574 struct xfrm_algo_encr_info {
1575 char *geniv;
1576 u16 blockbits;
1577 u16 defkeybits;
1578 };
1579
1580 struct xfrm_algo_comp_info {
1581 u16 threshold;
1582 };
1583
1584 struct xfrm_algo_desc {
1585 char *name;
1586 char *compat;
1587 u8 available:1;
1588 u8 pfkey_supported:1;
1589 union {
1590 struct xfrm_algo_aead_info aead;
1591 struct xfrm_algo_auth_info auth;
1592 struct xfrm_algo_encr_info encr;
1593 struct xfrm_algo_comp_info comp;
1594 } uinfo;
1595 struct sadb_alg desc;
1596 };
1597
1598 /* XFRM protocol handlers. */
1599 struct xfrm4_protocol {
1600 int (*handler)(struct sk_buff *skb);
1601 int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1602 int encap_type);
1603 int (*cb_handler)(struct sk_buff *skb, int err);
1604 int (*err_handler)(struct sk_buff *skb, u32 info);
1605
1606 struct xfrm4_protocol __rcu *next;
1607 int priority;
1608 };
1609
1610 struct xfrm6_protocol {
1611 int (*handler)(struct sk_buff *skb);
1612 int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1613 int encap_type);
1614 int (*cb_handler)(struct sk_buff *skb, int err);
1615 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1616 u8 type, u8 code, int offset, __be32 info);
1617
1618 struct xfrm6_protocol __rcu *next;
1619 int priority;
1620 };
1621
1622 /* XFRM tunnel handlers. */
1623 struct xfrm_tunnel {
1624 int (*handler)(struct sk_buff *skb);
1625 int (*cb_handler)(struct sk_buff *skb, int err);
1626 int (*err_handler)(struct sk_buff *skb, u32 info);
1627
1628 struct xfrm_tunnel __rcu *next;
1629 int priority;
1630 };
1631
1632 struct xfrm6_tunnel {
1633 int (*handler)(struct sk_buff *skb);
1634 int (*cb_handler)(struct sk_buff *skb, int err);
1635 int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1636 u8 type, u8 code, int offset, __be32 info);
1637 struct xfrm6_tunnel __rcu *next;
1638 int priority;
1639 };
1640
1641 void xfrm_init(void);
1642 void xfrm4_init(void);
1643 int xfrm_state_init(struct net *net);
1644 void xfrm_state_fini(struct net *net);
1645 void xfrm4_state_init(void);
1646 void xfrm4_protocol_init(void);
1647 #ifdef CONFIG_XFRM
1648 int xfrm6_init(void);
1649 void xfrm6_fini(void);
1650 int xfrm6_state_init(void);
1651 void xfrm6_state_fini(void);
1652 int xfrm6_protocol_init(void);
1653 void xfrm6_protocol_fini(void);
1654 #else
xfrm6_init(void)1655 static inline int xfrm6_init(void)
1656 {
1657 return 0;
1658 }
xfrm6_fini(void)1659 static inline void xfrm6_fini(void)
1660 {
1661 ;
1662 }
1663 #endif
1664
1665 #ifdef CONFIG_XFRM_STATISTICS
1666 int xfrm_proc_init(struct net *net);
1667 void xfrm_proc_fini(struct net *net);
1668 #endif
1669
1670 int xfrm_sysctl_init(struct net *net);
1671 #ifdef CONFIG_SYSCTL
1672 void xfrm_sysctl_fini(struct net *net);
1673 #else
xfrm_sysctl_fini(struct net * net)1674 static inline void xfrm_sysctl_fini(struct net *net)
1675 {
1676 }
1677 #endif
1678
1679 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1680 struct xfrm_address_filter *filter);
1681 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1682 int (*func)(struct xfrm_state *, int, void*), void *);
1683 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1684 struct xfrm_state *xfrm_state_alloc(struct net *net);
1685 void xfrm_state_free(struct xfrm_state *x);
1686 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1687 const xfrm_address_t *saddr,
1688 const struct flowi *fl,
1689 struct xfrm_tmpl *tmpl,
1690 struct xfrm_policy *pol, int *err,
1691 unsigned short family, u32 if_id);
1692 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1693 xfrm_address_t *daddr,
1694 xfrm_address_t *saddr,
1695 unsigned short family,
1696 u8 mode, u8 proto, u32 reqid);
1697 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1698 unsigned short family);
1699 int xfrm_state_check_expire(struct xfrm_state *x);
1700 void xfrm_state_update_stats(struct net *net);
1701 #ifdef CONFIG_XFRM_OFFLOAD
xfrm_dev_state_update_stats(struct xfrm_state * x)1702 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
1703 {
1704 struct xfrm_dev_offload *xdo = &x->xso;
1705 struct net_device *dev = READ_ONCE(xdo->dev);
1706
1707 if (dev && dev->xfrmdev_ops &&
1708 dev->xfrmdev_ops->xdo_dev_state_update_stats)
1709 dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
1710
1711 }
1712 #else
xfrm_dev_state_update_stats(struct xfrm_state * x)1713 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
1714 #endif
1715 void xfrm_state_insert(struct xfrm_state *x);
1716 int xfrm_state_add(struct xfrm_state *x);
1717 int xfrm_state_update(struct xfrm_state *x);
1718 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1719 const xfrm_address_t *daddr, __be32 spi,
1720 u8 proto, unsigned short family);
1721 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
1722 const xfrm_address_t *daddr,
1723 __be32 spi, u8 proto,
1724 unsigned short family);
1725 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1726 const xfrm_address_t *daddr,
1727 const xfrm_address_t *saddr,
1728 u8 proto,
1729 unsigned short family);
1730 #ifdef CONFIG_XFRM_SUB_POLICY
1731 void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1732 unsigned short family);
1733 void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1734 unsigned short family);
1735 #else
xfrm_tmpl_sort(struct xfrm_tmpl ** d,struct xfrm_tmpl ** s,int n,unsigned short family)1736 static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
1737 int n, unsigned short family)
1738 {
1739 }
1740
xfrm_state_sort(struct xfrm_state ** d,struct xfrm_state ** s,int n,unsigned short family)1741 static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
1742 int n, unsigned short family)
1743 {
1744 }
1745 #endif
1746
1747 struct xfrmk_sadinfo {
1748 u32 sadhcnt; /* current hash bkts */
1749 u32 sadhmcnt; /* max allowed hash bkts */
1750 u32 sadcnt; /* current running count */
1751 };
1752
1753 struct xfrmk_spdinfo {
1754 u32 incnt;
1755 u32 outcnt;
1756 u32 fwdcnt;
1757 u32 inscnt;
1758 u32 outscnt;
1759 u32 fwdscnt;
1760 u32 spdhcnt;
1761 u32 spdhmcnt;
1762 };
1763
1764 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
1765 int xfrm_state_delete(struct xfrm_state *x);
1766 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
1767 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1768 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1769 bool task_valid);
1770 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1771 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1772 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1773 int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
1774 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
1775 int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack);
1776 int xfrm_init_state(struct xfrm_state *x);
1777 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1778 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1779 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
1780 int (*finish)(struct net *, struct sock *,
1781 struct sk_buff *));
1782 int xfrm_trans_queue(struct sk_buff *skb,
1783 int (*finish)(struct net *, struct sock *,
1784 struct sk_buff *));
1785 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
1786 int xfrm_output(struct sock *sk, struct sk_buff *skb);
1787 int xfrm4_tunnel_check_size(struct sk_buff *skb);
1788 #if IS_ENABLED(CONFIG_IPV6)
1789 int xfrm6_tunnel_check_size(struct sk_buff *skb);
1790 #else
xfrm6_tunnel_check_size(struct sk_buff * skb)1791 static inline int xfrm6_tunnel_check_size(struct sk_buff *skb)
1792 {
1793 return -EMSGSIZE;
1794 }
1795 #endif
1796
1797 #if IS_ENABLED(CONFIG_NET_PKTGEN)
1798 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
1799 #endif
1800
1801 void xfrm_local_error(struct sk_buff *skb, int mtu);
1802 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1803 int encap_type);
1804 int xfrm4_transport_finish(struct sk_buff *skb, int async);
1805 int xfrm4_rcv(struct sk_buff *skb);
1806
xfrm4_rcv_spi(struct sk_buff * skb,int nexthdr,__be32 spi)1807 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1808 {
1809 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1810 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1811 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1812 return xfrm_input(skb, nexthdr, spi, 0);
1813 }
1814
1815 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1816 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1817 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1818 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1819 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1820 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1821 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1822 struct ip6_tnl *t);
1823 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1824 int encap_type);
1825 int xfrm6_transport_finish(struct sk_buff *skb, int async);
1826 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1827 int xfrm6_rcv(struct sk_buff *skb);
1828 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1829 xfrm_address_t *saddr, u8 proto);
1830 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1831 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
1832 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
1833 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1834 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1835 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1836 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1837 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1838
1839 #ifdef CONFIG_XFRM
1840 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
1841 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1842 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1843 struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1844 struct sk_buff *skb);
1845 struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1846 struct sk_buff *skb);
1847 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
1848 int optlen);
1849 #else
xfrm_user_policy(struct sock * sk,int optname,sockptr_t optval,int optlen)1850 static inline int xfrm_user_policy(struct sock *sk, int optname,
1851 sockptr_t optval, int optlen)
1852 {
1853 return -ENOPROTOOPT;
1854 }
1855 #endif
1856
1857 struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params);
1858
1859 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1860
1861 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1862 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1863 int (*func)(struct xfrm_policy *, int, int, void*),
1864 void *);
1865 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1866 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1867 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
1868 const struct xfrm_mark *mark,
1869 u32 if_id, u8 type, int dir,
1870 struct xfrm_selector *sel,
1871 struct xfrm_sec_ctx *ctx, int delete,
1872 int *err);
1873 struct xfrm_policy *xfrm_policy_byid(struct net *net,
1874 const struct xfrm_mark *mark, u32 if_id,
1875 u8 type, int dir, u32 id, int delete,
1876 int *err);
1877 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1878 void xfrm_policy_hash_rebuild(struct net *net);
1879 u32 xfrm_get_acqseq(void);
1880 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
1881 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
1882 struct netlink_ext_ack *extack);
1883 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1884 u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
1885 const xfrm_address_t *daddr,
1886 const xfrm_address_t *saddr, int create,
1887 unsigned short family);
1888 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1889
1890 #ifdef CONFIG_XFRM_MIGRATE
1891 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1892 const struct xfrm_migrate *m, int num_bundles,
1893 const struct xfrm_kmaddress *k,
1894 const struct xfrm_encap_tmpl *encap);
1895 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1896 u32 if_id);
1897 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1898 struct xfrm_migrate *m,
1899 struct xfrm_encap_tmpl *encap,
1900 struct net *net,
1901 struct xfrm_user_offload *xuo,
1902 struct netlink_ext_ack *extack);
1903 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1904 struct xfrm_migrate *m, int num_bundles,
1905 struct xfrm_kmaddress *k, struct net *net,
1906 struct xfrm_encap_tmpl *encap, u32 if_id,
1907 struct netlink_ext_ack *extack,
1908 struct xfrm_user_offload *xuo);
1909 #endif
1910
1911 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1912 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1913 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1914 xfrm_address_t *addr);
1915
1916 void xfrm_input_init(void);
1917 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1918
1919 void xfrm_probe_algs(void);
1920 int xfrm_count_pfkey_auth_supported(void);
1921 int xfrm_count_pfkey_enc_supported(void);
1922 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1923 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1924 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1925 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1926 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1927 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1928 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1929 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1930 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1931 int probe);
1932
xfrm6_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b)1933 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1934 const xfrm_address_t *b)
1935 {
1936 return ipv6_addr_equal((const struct in6_addr *)a,
1937 (const struct in6_addr *)b);
1938 }
1939
xfrm_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b,sa_family_t family)1940 static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1941 const xfrm_address_t *b,
1942 sa_family_t family)
1943 {
1944 switch (family) {
1945 default:
1946 case AF_INET:
1947 return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1948 case AF_INET6:
1949 return xfrm6_addr_equal(a, b);
1950 }
1951 }
1952
xfrm_policy_id2dir(u32 index)1953 static inline int xfrm_policy_id2dir(u32 index)
1954 {
1955 return index & 7;
1956 }
1957
1958 #ifdef CONFIG_XFRM
1959 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
1960 int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1961 void xfrm_replay_notify(struct xfrm_state *x, int event);
1962 int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
1963 int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1964
xfrm_aevent_is_on(struct net * net)1965 static inline int xfrm_aevent_is_on(struct net *net)
1966 {
1967 struct sock *nlsk;
1968 int ret = 0;
1969
1970 rcu_read_lock();
1971 nlsk = rcu_dereference(net->xfrm.nlsk);
1972 if (nlsk)
1973 ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1974 rcu_read_unlock();
1975 return ret;
1976 }
1977
xfrm_acquire_is_on(struct net * net)1978 static inline int xfrm_acquire_is_on(struct net *net)
1979 {
1980 struct sock *nlsk;
1981 int ret = 0;
1982
1983 rcu_read_lock();
1984 nlsk = rcu_dereference(net->xfrm.nlsk);
1985 if (nlsk)
1986 ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1987 rcu_read_unlock();
1988
1989 return ret;
1990 }
1991 #endif
1992
aead_len(struct xfrm_algo_aead * alg)1993 static inline unsigned int aead_len(struct xfrm_algo_aead *alg)
1994 {
1995 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1996 }
1997
xfrm_alg_len(const struct xfrm_algo * alg)1998 static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
1999 {
2000 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
2001 }
2002
xfrm_alg_auth_len(const struct xfrm_algo_auth * alg)2003 static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
2004 {
2005 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
2006 }
2007
xfrm_replay_state_esn_len(struct xfrm_replay_state_esn * replay_esn)2008 static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
2009 {
2010 return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
2011 }
2012
2013 #ifdef CONFIG_XFRM_MIGRATE
xfrm_replay_clone(struct xfrm_state * x,struct xfrm_state * orig)2014 static inline int xfrm_replay_clone(struct xfrm_state *x,
2015 struct xfrm_state *orig)
2016 {
2017
2018 x->replay_esn = kmemdup(orig->replay_esn,
2019 xfrm_replay_state_esn_len(orig->replay_esn),
2020 GFP_KERNEL);
2021 if (!x->replay_esn)
2022 return -ENOMEM;
2023 x->preplay_esn = kmemdup(orig->preplay_esn,
2024 xfrm_replay_state_esn_len(orig->preplay_esn),
2025 GFP_KERNEL);
2026 if (!x->preplay_esn)
2027 return -ENOMEM;
2028
2029 return 0;
2030 }
2031
xfrm_algo_aead_clone(struct xfrm_algo_aead * orig)2032 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
2033 {
2034 return kmemdup(orig, aead_len(orig), GFP_KERNEL);
2035 }
2036
2037
xfrm_algo_clone(struct xfrm_algo * orig)2038 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
2039 {
2040 return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
2041 }
2042
xfrm_algo_auth_clone(struct xfrm_algo_auth * orig)2043 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
2044 {
2045 return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
2046 }
2047
xfrm_states_put(struct xfrm_state ** states,int n)2048 static inline void xfrm_states_put(struct xfrm_state **states, int n)
2049 {
2050 int i;
2051 for (i = 0; i < n; i++)
2052 xfrm_state_put(*(states + i));
2053 }
2054
xfrm_states_delete(struct xfrm_state ** states,int n)2055 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
2056 {
2057 int i;
2058 for (i = 0; i < n; i++)
2059 xfrm_state_delete(*(states + i));
2060 }
2061 #endif
2062
2063 void __init xfrm_dev_init(void);
2064
2065 #ifdef CONFIG_XFRM_OFFLOAD
2066 void xfrm_dev_resume(struct sk_buff *skb);
2067 void xfrm_dev_backlog(struct softnet_data *sd);
2068 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
2069 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
2070 struct xfrm_user_offload *xuo,
2071 struct netlink_ext_ack *extack);
2072 int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2073 struct xfrm_user_offload *xuo, u8 dir,
2074 struct netlink_ext_ack *extack);
2075 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
2076 void xfrm_dev_state_delete(struct xfrm_state *x);
2077 void xfrm_dev_state_free(struct xfrm_state *x);
2078
xfrm_dev_state_advance_esn(struct xfrm_state * x)2079 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2080 {
2081 struct xfrm_dev_offload *xso = &x->xso;
2082 struct net_device *dev = READ_ONCE(xso->dev);
2083
2084 if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
2085 dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
2086 }
2087
xfrm_dst_offload_ok(struct dst_entry * dst)2088 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2089 {
2090 struct xfrm_state *x = dst->xfrm;
2091 struct xfrm_dst *xdst;
2092
2093 if (!x || !x->type_offload)
2094 return false;
2095
2096 xdst = (struct xfrm_dst *) dst;
2097 if (!x->xso.offload_handle && !xdst->child->xfrm)
2098 return true;
2099 if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
2100 !xdst->child->xfrm)
2101 return true;
2102
2103 return false;
2104 }
2105
xfrm_dev_policy_delete(struct xfrm_policy * x)2106 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2107 {
2108 struct xfrm_dev_offload *xdo = &x->xdo;
2109 struct net_device *dev = xdo->dev;
2110
2111 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
2112 dev->xfrmdev_ops->xdo_dev_policy_delete(x);
2113 }
2114
xfrm_dev_policy_free(struct xfrm_policy * x)2115 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2116 {
2117 struct xfrm_dev_offload *xdo = &x->xdo;
2118 struct net_device *dev = xdo->dev;
2119
2120 if (dev && dev->xfrmdev_ops) {
2121 if (dev->xfrmdev_ops->xdo_dev_policy_free)
2122 dev->xfrmdev_ops->xdo_dev_policy_free(x);
2123 xdo->dev = NULL;
2124 netdev_put(dev, &xdo->dev_tracker);
2125 }
2126 }
2127 #else
xfrm_dev_resume(struct sk_buff * skb)2128 static inline void xfrm_dev_resume(struct sk_buff *skb)
2129 {
2130 }
2131
xfrm_dev_backlog(struct softnet_data * sd)2132 static inline void xfrm_dev_backlog(struct softnet_data *sd)
2133 {
2134 }
2135
validate_xmit_xfrm(struct sk_buff * skb,netdev_features_t features,bool * again)2136 static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
2137 {
2138 return skb;
2139 }
2140
xfrm_dev_state_add(struct net * net,struct xfrm_state * x,struct xfrm_user_offload * xuo,struct netlink_ext_ack * extack)2141 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
2142 {
2143 return 0;
2144 }
2145
xfrm_dev_state_delete(struct xfrm_state * x)2146 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
2147 {
2148 }
2149
xfrm_dev_state_free(struct xfrm_state * x)2150 static inline void xfrm_dev_state_free(struct xfrm_state *x)
2151 {
2152 }
2153
xfrm_dev_policy_add(struct net * net,struct xfrm_policy * xp,struct xfrm_user_offload * xuo,u8 dir,struct netlink_ext_ack * extack)2154 static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2155 struct xfrm_user_offload *xuo, u8 dir,
2156 struct netlink_ext_ack *extack)
2157 {
2158 return 0;
2159 }
2160
xfrm_dev_policy_delete(struct xfrm_policy * x)2161 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2162 {
2163 }
2164
xfrm_dev_policy_free(struct xfrm_policy * x)2165 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2166 {
2167 }
2168
xfrm_dev_offload_ok(struct sk_buff * skb,struct xfrm_state * x)2169 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
2170 {
2171 return false;
2172 }
2173
xfrm_dev_state_advance_esn(struct xfrm_state * x)2174 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2175 {
2176 }
2177
xfrm_dst_offload_ok(struct dst_entry * dst)2178 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2179 {
2180 return false;
2181 }
2182 #endif
2183
xfrm_mark_get(struct nlattr ** attrs,struct xfrm_mark * m)2184 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
2185 {
2186 if (attrs[XFRMA_MARK])
2187 memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
2188 else
2189 m->v = m->m = 0;
2190
2191 return m->v & m->m;
2192 }
2193
xfrm_mark_put(struct sk_buff * skb,const struct xfrm_mark * m)2194 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
2195 {
2196 int ret = 0;
2197
2198 if (m->m | m->v)
2199 ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
2200 return ret;
2201 }
2202
xfrm_smark_get(__u32 mark,struct xfrm_state * x)2203 static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
2204 {
2205 struct xfrm_mark *m = &x->props.smark;
2206
2207 return (m->v & m->m) | (mark & ~m->m);
2208 }
2209
xfrm_if_id_put(struct sk_buff * skb,__u32 if_id)2210 static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
2211 {
2212 int ret = 0;
2213
2214 if (if_id)
2215 ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
2216 return ret;
2217 }
2218
xfrm_tunnel_check(struct sk_buff * skb,struct xfrm_state * x,unsigned int family)2219 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
2220 unsigned int family)
2221 {
2222 bool tunnel = false;
2223
2224 switch(family) {
2225 case AF_INET:
2226 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
2227 tunnel = true;
2228 break;
2229 case AF_INET6:
2230 if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
2231 tunnel = true;
2232 break;
2233 }
2234 if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
2235 return -EINVAL;
2236
2237 return 0;
2238 }
2239
2240 extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
2241 extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
2242
2243 struct xfrm_translator {
2244 /* Allocate frag_list and put compat translation there */
2245 int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
2246
2247 /* Allocate nlmsg with 64-bit translaton of received 32-bit message */
2248 struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
2249 int maxtype, const struct nla_policy *policy,
2250 struct netlink_ext_ack *extack);
2251
2252 /* Translate 32-bit user_policy from sockptr */
2253 int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
2254
2255 struct module *owner;
2256 };
2257
2258 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2259 extern int xfrm_register_translator(struct xfrm_translator *xtr);
2260 extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
2261 extern struct xfrm_translator *xfrm_get_translator(void);
2262 extern void xfrm_put_translator(struct xfrm_translator *xtr);
2263 #else
xfrm_get_translator(void)2264 static inline struct xfrm_translator *xfrm_get_translator(void)
2265 {
2266 return NULL;
2267 }
xfrm_put_translator(struct xfrm_translator * xtr)2268 static inline void xfrm_put_translator(struct xfrm_translator *xtr)
2269 {
2270 }
2271 #endif
2272
2273 #if IS_ENABLED(CONFIG_IPV6)
xfrm6_local_dontfrag(const struct sock * sk)2274 static inline bool xfrm6_local_dontfrag(const struct sock *sk)
2275 {
2276 int proto;
2277
2278 if (!sk || sk->sk_family != AF_INET6)
2279 return false;
2280
2281 proto = sk->sk_protocol;
2282 if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
2283 return inet6_test_bit(DONTFRAG, sk);
2284
2285 return false;
2286 }
2287 #endif
2288
2289 #if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
2290 (IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
2291
2292 extern struct metadata_dst __percpu *xfrm_bpf_md_dst;
2293
2294 int register_xfrm_interface_bpf(void);
2295
2296 #else
2297
register_xfrm_interface_bpf(void)2298 static inline int register_xfrm_interface_bpf(void)
2299 {
2300 return 0;
2301 }
2302
2303 #endif
2304
2305 #if IS_ENABLED(CONFIG_DEBUG_INFO_BTF)
2306 int register_xfrm_state_bpf(void);
2307 #else
register_xfrm_state_bpf(void)2308 static inline int register_xfrm_state_bpf(void)
2309 {
2310 return 0;
2311 }
2312 #endif
2313
2314 int xfrm_nat_keepalive_init(unsigned short family);
2315 void xfrm_nat_keepalive_fini(unsigned short family);
2316 int xfrm_nat_keepalive_net_init(struct net *net);
2317 int xfrm_nat_keepalive_net_fini(struct net *net);
2318 void xfrm_nat_keepalive_state_updated(struct xfrm_state *x);
2319
2320 #endif /* _NET_XFRM_H */
2321