xref: /linux/include/net/xfrm.h (revision b6459415b384cb829f0b2a4268f211c789f6cf0b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _NET_XFRM_H
3 #define _NET_XFRM_H
4 
5 #include <linux/compiler.h>
6 #include <linux/xfrm.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/ipsec.h>
13 #include <linux/in6.h>
14 #include <linux/mutex.h>
15 #include <linux/audit.h>
16 #include <linux/slab.h>
17 #include <linux/refcount.h>
18 #include <linux/sockptr.h>
19 
20 #include <net/sock.h>
21 #include <net/dst.h>
22 #include <net/ip.h>
23 #include <net/route.h>
24 #include <net/ipv6.h>
25 #include <net/ip6_fib.h>
26 #include <net/flow.h>
27 #include <net/gro_cells.h>
28 
29 #include <linux/interrupt.h>
30 
31 #ifdef CONFIG_XFRM_STATISTICS
32 #include <net/snmp.h>
33 #endif
34 
35 #define XFRM_PROTO_ESP		50
36 #define XFRM_PROTO_AH		51
37 #define XFRM_PROTO_COMP		108
38 #define XFRM_PROTO_IPIP		4
39 #define XFRM_PROTO_IPV6		41
40 #define XFRM_PROTO_ROUTING	IPPROTO_ROUTING
41 #define XFRM_PROTO_DSTOPTS	IPPROTO_DSTOPTS
42 
43 #define XFRM_ALIGN4(len)	(((len) + 3) & ~3)
44 #define XFRM_ALIGN8(len)	(((len) + 7) & ~7)
45 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
46 	MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
47 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
48 	MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
49 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
50 	MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
51 
52 #ifdef CONFIG_XFRM_STATISTICS
53 #define XFRM_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
54 #else
55 #define XFRM_INC_STATS(net, field)	((void)(net))
56 #endif
57 
58 
59 /* Organization of SPD aka "XFRM rules"
60    ------------------------------------
61 
62    Basic objects:
63    - policy rule, struct xfrm_policy (=SPD entry)
64    - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
65    - instance of a transformer, struct xfrm_state (=SA)
66    - template to clone xfrm_state, struct xfrm_tmpl
67 
68    SPD is plain linear list of xfrm_policy rules, ordered by priority.
69    (To be compatible with existing pfkeyv2 implementations,
70    many rules with priority of 0x7fffffff are allowed to exist and
71    such rules are ordered in an unpredictable way, thanks to bsd folks.)
72 
73    Lookup is plain linear search until the first match with selector.
74 
75    If "action" is "block", then we prohibit the flow, otherwise:
76    if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
77    policy entry has list of up to XFRM_MAX_DEPTH transformations,
78    described by templates xfrm_tmpl. Each template is resolved
79    to a complete xfrm_state (see below) and we pack bundle of transformations
80    to a dst_entry returned to requestor.
81 
82    dst -. xfrm  .-> xfrm_state #1
83     |---. child .-> dst -. xfrm .-> xfrm_state #2
84                      |---. child .-> dst -. xfrm .-> xfrm_state #3
85                                       |---. child .-> NULL
86 
87    Bundles are cached at xrfm_policy struct (field ->bundles).
88 
89 
90    Resolution of xrfm_tmpl
91    -----------------------
92    Template contains:
93    1. ->mode		Mode: transport or tunnel
94    2. ->id.proto	Protocol: AH/ESP/IPCOMP
95    3. ->id.daddr	Remote tunnel endpoint, ignored for transport mode.
96       Q: allow to resolve security gateway?
97    4. ->id.spi          If not zero, static SPI.
98    5. ->saddr		Local tunnel endpoint, ignored for transport mode.
99    6. ->algos		List of allowed algos. Plain bitmask now.
100       Q: ealgos, aalgos, calgos. What a mess...
101    7. ->share		Sharing mode.
102       Q: how to implement private sharing mode? To add struct sock* to
103       flow id?
104 
105    Having this template we search through SAD searching for entries
106    with appropriate mode/proto/algo, permitted by selector.
107    If no appropriate entry found, it is requested from key manager.
108 
109    PROBLEMS:
110    Q: How to find all the bundles referring to a physical path for
111       PMTU discovery? Seems, dst should contain list of all parents...
112       and enter to infinite locking hierarchy disaster.
113       No! It is easier, we will not search for them, let them find us.
114       We add genid to each dst plus pointer to genid of raw IP route,
115       pmtu disc will update pmtu on raw IP route and increase its genid.
116       dst_check() will see this for top level and trigger resyncing
117       metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
118  */
119 
120 struct xfrm_state_walk {
121 	struct list_head	all;
122 	u8			state;
123 	u8			dying;
124 	u8			proto;
125 	u32			seq;
126 	struct xfrm_address_filter *filter;
127 };
128 
129 struct xfrm_state_offload {
130 	struct net_device	*dev;
131 	netdevice_tracker	dev_tracker;
132 	struct net_device	*real_dev;
133 	unsigned long		offload_handle;
134 	unsigned int		num_exthdrs;
135 	u8			flags;
136 };
137 
138 struct xfrm_mode {
139 	u8 encap;
140 	u8 family;
141 	u8 flags;
142 };
143 
144 /* Flags for xfrm_mode. */
145 enum {
146 	XFRM_MODE_FLAG_TUNNEL = 1,
147 };
148 
149 enum xfrm_replay_mode {
150 	XFRM_REPLAY_MODE_LEGACY,
151 	XFRM_REPLAY_MODE_BMP,
152 	XFRM_REPLAY_MODE_ESN,
153 };
154 
155 /* Full description of state of transformer. */
156 struct xfrm_state {
157 	possible_net_t		xs_net;
158 	union {
159 		struct hlist_node	gclist;
160 		struct hlist_node	bydst;
161 	};
162 	struct hlist_node	bysrc;
163 	struct hlist_node	byspi;
164 	struct hlist_node	byseq;
165 
166 	refcount_t		refcnt;
167 	spinlock_t		lock;
168 
169 	struct xfrm_id		id;
170 	struct xfrm_selector	sel;
171 	struct xfrm_mark	mark;
172 	u32			if_id;
173 	u32			tfcpad;
174 
175 	u32			genid;
176 
177 	/* Key manager bits */
178 	struct xfrm_state_walk	km;
179 
180 	/* Parameters of this state. */
181 	struct {
182 		u32		reqid;
183 		u8		mode;
184 		u8		replay_window;
185 		u8		aalgo, ealgo, calgo;
186 		u8		flags;
187 		u16		family;
188 		xfrm_address_t	saddr;
189 		int		header_len;
190 		int		trailer_len;
191 		u32		extra_flags;
192 		struct xfrm_mark	smark;
193 	} props;
194 
195 	struct xfrm_lifetime_cfg lft;
196 
197 	/* Data for transformer */
198 	struct xfrm_algo_auth	*aalg;
199 	struct xfrm_algo	*ealg;
200 	struct xfrm_algo	*calg;
201 	struct xfrm_algo_aead	*aead;
202 	const char		*geniv;
203 
204 	/* Data for encapsulator */
205 	struct xfrm_encap_tmpl	*encap;
206 	struct sock __rcu	*encap_sk;
207 
208 	/* Data for care-of address */
209 	xfrm_address_t	*coaddr;
210 
211 	/* IPComp needs an IPIP tunnel for handling uncompressed packets */
212 	struct xfrm_state	*tunnel;
213 
214 	/* If a tunnel, number of users + 1 */
215 	atomic_t		tunnel_users;
216 
217 	/* State for replay detection */
218 	struct xfrm_replay_state replay;
219 	struct xfrm_replay_state_esn *replay_esn;
220 
221 	/* Replay detection state at the time we sent the last notification */
222 	struct xfrm_replay_state preplay;
223 	struct xfrm_replay_state_esn *preplay_esn;
224 
225 	/* replay detection mode */
226 	enum xfrm_replay_mode    repl_mode;
227 	/* internal flag that only holds state for delayed aevent at the
228 	 * moment
229 	*/
230 	u32			xflags;
231 
232 	/* Replay detection notification settings */
233 	u32			replay_maxage;
234 	u32			replay_maxdiff;
235 
236 	/* Replay detection notification timer */
237 	struct timer_list	rtimer;
238 
239 	/* Statistics */
240 	struct xfrm_stats	stats;
241 
242 	struct xfrm_lifetime_cur curlft;
243 	struct hrtimer		mtimer;
244 
245 	struct xfrm_state_offload xso;
246 
247 	/* used to fix curlft->add_time when changing date */
248 	long		saved_tmo;
249 
250 	/* Last used time */
251 	time64_t		lastused;
252 
253 	struct page_frag xfrag;
254 
255 	/* Reference to data common to all the instances of this
256 	 * transformer. */
257 	const struct xfrm_type	*type;
258 	struct xfrm_mode	inner_mode;
259 	struct xfrm_mode	inner_mode_iaf;
260 	struct xfrm_mode	outer_mode;
261 
262 	const struct xfrm_type_offload	*type_offload;
263 
264 	/* Security context */
265 	struct xfrm_sec_ctx	*security;
266 
267 	/* Private data of this transformer, format is opaque,
268 	 * interpreted by xfrm_type methods. */
269 	void			*data;
270 };
271 
272 static inline struct net *xs_net(struct xfrm_state *x)
273 {
274 	return read_pnet(&x->xs_net);
275 }
276 
277 /* xflags - make enum if more show up */
278 #define XFRM_TIME_DEFER	1
279 #define XFRM_SOFT_EXPIRE 2
280 
281 enum {
282 	XFRM_STATE_VOID,
283 	XFRM_STATE_ACQ,
284 	XFRM_STATE_VALID,
285 	XFRM_STATE_ERROR,
286 	XFRM_STATE_EXPIRED,
287 	XFRM_STATE_DEAD
288 };
289 
290 /* callback structure passed from either netlink or pfkey */
291 struct km_event {
292 	union {
293 		u32 hard;
294 		u32 proto;
295 		u32 byid;
296 		u32 aevent;
297 		u32 type;
298 	} data;
299 
300 	u32	seq;
301 	u32	portid;
302 	u32	event;
303 	struct net *net;
304 };
305 
306 struct xfrm_if_cb {
307 	struct xfrm_if	*(*decode_session)(struct sk_buff *skb,
308 					   unsigned short family);
309 };
310 
311 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
312 void xfrm_if_unregister_cb(void);
313 
314 struct net_device;
315 struct xfrm_type;
316 struct xfrm_dst;
317 struct xfrm_policy_afinfo {
318 	struct dst_ops		*dst_ops;
319 	struct dst_entry	*(*dst_lookup)(struct net *net,
320 					       int tos, int oif,
321 					       const xfrm_address_t *saddr,
322 					       const xfrm_address_t *daddr,
323 					       u32 mark);
324 	int			(*get_saddr)(struct net *net, int oif,
325 					     xfrm_address_t *saddr,
326 					     xfrm_address_t *daddr,
327 					     u32 mark);
328 	int			(*fill_dst)(struct xfrm_dst *xdst,
329 					    struct net_device *dev,
330 					    const struct flowi *fl);
331 	struct dst_entry	*(*blackhole_route)(struct net *net, struct dst_entry *orig);
332 };
333 
334 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
335 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
336 void km_policy_notify(struct xfrm_policy *xp, int dir,
337 		      const struct km_event *c);
338 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
339 
340 struct xfrm_tmpl;
341 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
342 	     struct xfrm_policy *pol);
343 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
344 int __xfrm_state_delete(struct xfrm_state *x);
345 
346 struct xfrm_state_afinfo {
347 	u8				family;
348 	u8				proto;
349 
350 	const struct xfrm_type_offload *type_offload_esp;
351 
352 	const struct xfrm_type		*type_esp;
353 	const struct xfrm_type		*type_ipip;
354 	const struct xfrm_type		*type_ipip6;
355 	const struct xfrm_type		*type_comp;
356 	const struct xfrm_type		*type_ah;
357 	const struct xfrm_type		*type_routing;
358 	const struct xfrm_type		*type_dstopts;
359 
360 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
361 	int			(*transport_finish)(struct sk_buff *skb,
362 						    int async);
363 	void			(*local_error)(struct sk_buff *skb, u32 mtu);
364 };
365 
366 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
367 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
368 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
369 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
370 
371 struct xfrm_input_afinfo {
372 	u8			family;
373 	bool			is_ipip;
374 	int			(*callback)(struct sk_buff *skb, u8 protocol,
375 					    int err);
376 };
377 
378 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
379 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
380 
381 void xfrm_flush_gc(void);
382 void xfrm_state_delete_tunnel(struct xfrm_state *x);
383 
384 struct xfrm_type {
385 	struct module		*owner;
386 	u8			proto;
387 	u8			flags;
388 #define XFRM_TYPE_NON_FRAGMENT	1
389 #define XFRM_TYPE_REPLAY_PROT	2
390 #define XFRM_TYPE_LOCAL_COADDR	4
391 #define XFRM_TYPE_REMOTE_COADDR	8
392 
393 	int			(*init_state)(struct xfrm_state *x);
394 	void			(*destructor)(struct xfrm_state *);
395 	int			(*input)(struct xfrm_state *, struct sk_buff *skb);
396 	int			(*output)(struct xfrm_state *, struct sk_buff *pskb);
397 	int			(*reject)(struct xfrm_state *, struct sk_buff *,
398 					  const struct flowi *);
399 };
400 
401 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
402 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
403 
404 struct xfrm_type_offload {
405 	struct module	*owner;
406 	u8		proto;
407 	void		(*encap)(struct xfrm_state *, struct sk_buff *pskb);
408 	int		(*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
409 	int		(*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
410 };
411 
412 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
413 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
414 
415 static inline int xfrm_af2proto(unsigned int family)
416 {
417 	switch(family) {
418 	case AF_INET:
419 		return IPPROTO_IPIP;
420 	case AF_INET6:
421 		return IPPROTO_IPV6;
422 	default:
423 		return 0;
424 	}
425 }
426 
427 static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
428 {
429 	if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
430 	    (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
431 		return &x->inner_mode;
432 	else
433 		return &x->inner_mode_iaf;
434 }
435 
436 struct xfrm_tmpl {
437 /* id in template is interpreted as:
438  * daddr - destination of tunnel, may be zero for transport mode.
439  * spi   - zero to acquire spi. Not zero if spi is static, then
440  *	   daddr must be fixed too.
441  * proto - AH/ESP/IPCOMP
442  */
443 	struct xfrm_id		id;
444 
445 /* Source address of tunnel. Ignored, if it is not a tunnel. */
446 	xfrm_address_t		saddr;
447 
448 	unsigned short		encap_family;
449 
450 	u32			reqid;
451 
452 /* Mode: transport, tunnel etc. */
453 	u8			mode;
454 
455 /* Sharing mode: unique, this session only, this user only etc. */
456 	u8			share;
457 
458 /* May skip this transfomration if no SA is found */
459 	u8			optional;
460 
461 /* Skip aalgos/ealgos/calgos checks. */
462 	u8			allalgs;
463 
464 /* Bit mask of algos allowed for acquisition */
465 	u32			aalgos;
466 	u32			ealgos;
467 	u32			calgos;
468 };
469 
470 #define XFRM_MAX_DEPTH		6
471 #define XFRM_MAX_OFFLOAD_DEPTH	1
472 
473 struct xfrm_policy_walk_entry {
474 	struct list_head	all;
475 	u8			dead;
476 };
477 
478 struct xfrm_policy_walk {
479 	struct xfrm_policy_walk_entry walk;
480 	u8 type;
481 	u32 seq;
482 };
483 
484 struct xfrm_policy_queue {
485 	struct sk_buff_head	hold_queue;
486 	struct timer_list	hold_timer;
487 	unsigned long		timeout;
488 };
489 
490 struct xfrm_policy {
491 	possible_net_t		xp_net;
492 	struct hlist_node	bydst;
493 	struct hlist_node	byidx;
494 
495 	/* This lock only affects elements except for entry. */
496 	rwlock_t		lock;
497 	refcount_t		refcnt;
498 	u32			pos;
499 	struct timer_list	timer;
500 
501 	atomic_t		genid;
502 	u32			priority;
503 	u32			index;
504 	u32			if_id;
505 	struct xfrm_mark	mark;
506 	struct xfrm_selector	selector;
507 	struct xfrm_lifetime_cfg lft;
508 	struct xfrm_lifetime_cur curlft;
509 	struct xfrm_policy_walk_entry walk;
510 	struct xfrm_policy_queue polq;
511 	bool                    bydst_reinsert;
512 	u8			type;
513 	u8			action;
514 	u8			flags;
515 	u8			xfrm_nr;
516 	u16			family;
517 	struct xfrm_sec_ctx	*security;
518 	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
519 	struct hlist_node	bydst_inexact_list;
520 	struct rcu_head		rcu;
521 };
522 
523 static inline struct net *xp_net(const struct xfrm_policy *xp)
524 {
525 	return read_pnet(&xp->xp_net);
526 }
527 
528 struct xfrm_kmaddress {
529 	xfrm_address_t          local;
530 	xfrm_address_t          remote;
531 	u32			reserved;
532 	u16			family;
533 };
534 
535 struct xfrm_migrate {
536 	xfrm_address_t		old_daddr;
537 	xfrm_address_t		old_saddr;
538 	xfrm_address_t		new_daddr;
539 	xfrm_address_t		new_saddr;
540 	u8			proto;
541 	u8			mode;
542 	u16			reserved;
543 	u32			reqid;
544 	u16			old_family;
545 	u16			new_family;
546 };
547 
548 #define XFRM_KM_TIMEOUT                30
549 /* what happened */
550 #define XFRM_REPLAY_UPDATE	XFRM_AE_CR
551 #define XFRM_REPLAY_TIMEOUT	XFRM_AE_CE
552 
553 /* default aevent timeout in units of 100ms */
554 #define XFRM_AE_ETIME			10
555 /* Async Event timer multiplier */
556 #define XFRM_AE_ETH_M			10
557 /* default seq threshold size */
558 #define XFRM_AE_SEQT_SIZE		2
559 
560 struct xfrm_mgr {
561 	struct list_head	list;
562 	int			(*notify)(struct xfrm_state *x, const struct km_event *c);
563 	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
564 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
565 	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
566 	int			(*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
567 	int			(*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
568 	int			(*migrate)(const struct xfrm_selector *sel,
569 					   u8 dir, u8 type,
570 					   const struct xfrm_migrate *m,
571 					   int num_bundles,
572 					   const struct xfrm_kmaddress *k,
573 					   const struct xfrm_encap_tmpl *encap);
574 	bool			(*is_alive)(const struct km_event *c);
575 };
576 
577 int xfrm_register_km(struct xfrm_mgr *km);
578 int xfrm_unregister_km(struct xfrm_mgr *km);
579 
580 struct xfrm_tunnel_skb_cb {
581 	union {
582 		struct inet_skb_parm h4;
583 		struct inet6_skb_parm h6;
584 	} header;
585 
586 	union {
587 		struct ip_tunnel *ip4;
588 		struct ip6_tnl *ip6;
589 	} tunnel;
590 };
591 
592 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
593 
594 /*
595  * This structure is used for the duration where packets are being
596  * transformed by IPsec.  As soon as the packet leaves IPsec the
597  * area beyond the generic IP part may be overwritten.
598  */
599 struct xfrm_skb_cb {
600 	struct xfrm_tunnel_skb_cb header;
601 
602         /* Sequence number for replay protection. */
603 	union {
604 		struct {
605 			__u32 low;
606 			__u32 hi;
607 		} output;
608 		struct {
609 			__be32 low;
610 			__be32 hi;
611 		} input;
612 	} seq;
613 };
614 
615 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
616 
617 /*
618  * This structure is used by the afinfo prepare_input/prepare_output functions
619  * to transmit header information to the mode input/output functions.
620  */
621 struct xfrm_mode_skb_cb {
622 	struct xfrm_tunnel_skb_cb header;
623 
624 	/* Copied from header for IPv4, always set to zero and DF for IPv6. */
625 	__be16 id;
626 	__be16 frag_off;
627 
628 	/* IP header length (excluding options or extension headers). */
629 	u8 ihl;
630 
631 	/* TOS for IPv4, class for IPv6. */
632 	u8 tos;
633 
634 	/* TTL for IPv4, hop limitfor IPv6. */
635 	u8 ttl;
636 
637 	/* Protocol for IPv4, NH for IPv6. */
638 	u8 protocol;
639 
640 	/* Option length for IPv4, zero for IPv6. */
641 	u8 optlen;
642 
643 	/* Used by IPv6 only, zero for IPv4. */
644 	u8 flow_lbl[3];
645 };
646 
647 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
648 
649 /*
650  * This structure is used by the input processing to locate the SPI and
651  * related information.
652  */
653 struct xfrm_spi_skb_cb {
654 	struct xfrm_tunnel_skb_cb header;
655 
656 	unsigned int daddroff;
657 	unsigned int family;
658 	__be32 seq;
659 };
660 
661 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
662 
663 #ifdef CONFIG_AUDITSYSCALL
664 static inline struct audit_buffer *xfrm_audit_start(const char *op)
665 {
666 	struct audit_buffer *audit_buf = NULL;
667 
668 	if (audit_enabled == AUDIT_OFF)
669 		return NULL;
670 	audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
671 				    AUDIT_MAC_IPSEC_EVENT);
672 	if (audit_buf == NULL)
673 		return NULL;
674 	audit_log_format(audit_buf, "op=%s", op);
675 	return audit_buf;
676 }
677 
678 static inline void xfrm_audit_helper_usrinfo(bool task_valid,
679 					     struct audit_buffer *audit_buf)
680 {
681 	const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
682 					    audit_get_loginuid(current) :
683 					    INVALID_UID);
684 	const unsigned int ses = task_valid ? audit_get_sessionid(current) :
685 		AUDIT_SID_UNSET;
686 
687 	audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
688 	audit_log_task_context(audit_buf);
689 }
690 
691 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
692 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
693 			      bool task_valid);
694 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
695 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
696 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
697 				      struct sk_buff *skb);
698 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
699 			     __be32 net_seq);
700 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
701 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
702 			       __be32 net_seq);
703 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
704 			      u8 proto);
705 #else
706 
707 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
708 					 bool task_valid)
709 {
710 }
711 
712 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
713 					    bool task_valid)
714 {
715 }
716 
717 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
718 					bool task_valid)
719 {
720 }
721 
722 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
723 					   bool task_valid)
724 {
725 }
726 
727 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
728 					     struct sk_buff *skb)
729 {
730 }
731 
732 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
733 					   struct sk_buff *skb, __be32 net_seq)
734 {
735 }
736 
737 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
738 				      u16 family)
739 {
740 }
741 
742 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
743 				      __be32 net_spi, __be32 net_seq)
744 {
745 }
746 
747 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
748 				     struct sk_buff *skb, u8 proto)
749 {
750 }
751 #endif /* CONFIG_AUDITSYSCALL */
752 
753 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
754 {
755 	if (likely(policy != NULL))
756 		refcount_inc(&policy->refcnt);
757 }
758 
759 void xfrm_policy_destroy(struct xfrm_policy *policy);
760 
761 static inline void xfrm_pol_put(struct xfrm_policy *policy)
762 {
763 	if (refcount_dec_and_test(&policy->refcnt))
764 		xfrm_policy_destroy(policy);
765 }
766 
767 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
768 {
769 	int i;
770 	for (i = npols - 1; i >= 0; --i)
771 		xfrm_pol_put(pols[i]);
772 }
773 
774 void __xfrm_state_destroy(struct xfrm_state *, bool);
775 
776 static inline void __xfrm_state_put(struct xfrm_state *x)
777 {
778 	refcount_dec(&x->refcnt);
779 }
780 
781 static inline void xfrm_state_put(struct xfrm_state *x)
782 {
783 	if (refcount_dec_and_test(&x->refcnt))
784 		__xfrm_state_destroy(x, false);
785 }
786 
787 static inline void xfrm_state_put_sync(struct xfrm_state *x)
788 {
789 	if (refcount_dec_and_test(&x->refcnt))
790 		__xfrm_state_destroy(x, true);
791 }
792 
793 static inline void xfrm_state_hold(struct xfrm_state *x)
794 {
795 	refcount_inc(&x->refcnt);
796 }
797 
798 static inline bool addr_match(const void *token1, const void *token2,
799 			      unsigned int prefixlen)
800 {
801 	const __be32 *a1 = token1;
802 	const __be32 *a2 = token2;
803 	unsigned int pdw;
804 	unsigned int pbi;
805 
806 	pdw = prefixlen >> 5;	  /* num of whole u32 in prefix */
807 	pbi = prefixlen &  0x1f;  /* num of bits in incomplete u32 in prefix */
808 
809 	if (pdw)
810 		if (memcmp(a1, a2, pdw << 2))
811 			return false;
812 
813 	if (pbi) {
814 		__be32 mask;
815 
816 		mask = htonl((0xffffffff) << (32 - pbi));
817 
818 		if ((a1[pdw] ^ a2[pdw]) & mask)
819 			return false;
820 	}
821 
822 	return true;
823 }
824 
825 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
826 {
827 	/* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
828 	if (sizeof(long) == 4 && prefixlen == 0)
829 		return true;
830 	return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
831 }
832 
833 static __inline__
834 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
835 {
836 	__be16 port;
837 	switch(fl->flowi_proto) {
838 	case IPPROTO_TCP:
839 	case IPPROTO_UDP:
840 	case IPPROTO_UDPLITE:
841 	case IPPROTO_SCTP:
842 		port = uli->ports.sport;
843 		break;
844 	case IPPROTO_ICMP:
845 	case IPPROTO_ICMPV6:
846 		port = htons(uli->icmpt.type);
847 		break;
848 	case IPPROTO_MH:
849 		port = htons(uli->mht.type);
850 		break;
851 	case IPPROTO_GRE:
852 		port = htons(ntohl(uli->gre_key) >> 16);
853 		break;
854 	default:
855 		port = 0;	/*XXX*/
856 	}
857 	return port;
858 }
859 
860 static __inline__
861 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
862 {
863 	__be16 port;
864 	switch(fl->flowi_proto) {
865 	case IPPROTO_TCP:
866 	case IPPROTO_UDP:
867 	case IPPROTO_UDPLITE:
868 	case IPPROTO_SCTP:
869 		port = uli->ports.dport;
870 		break;
871 	case IPPROTO_ICMP:
872 	case IPPROTO_ICMPV6:
873 		port = htons(uli->icmpt.code);
874 		break;
875 	case IPPROTO_GRE:
876 		port = htons(ntohl(uli->gre_key) & 0xffff);
877 		break;
878 	default:
879 		port = 0;	/*XXX*/
880 	}
881 	return port;
882 }
883 
884 bool xfrm_selector_match(const struct xfrm_selector *sel,
885 			 const struct flowi *fl, unsigned short family);
886 
887 #ifdef CONFIG_SECURITY_NETWORK_XFRM
888 /*	If neither has a context --> match
889  * 	Otherwise, both must have a context and the sids, doi, alg must match
890  */
891 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
892 {
893 	return ((!s1 && !s2) ||
894 		(s1 && s2 &&
895 		 (s1->ctx_sid == s2->ctx_sid) &&
896 		 (s1->ctx_doi == s2->ctx_doi) &&
897 		 (s1->ctx_alg == s2->ctx_alg)));
898 }
899 #else
900 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
901 {
902 	return true;
903 }
904 #endif
905 
906 /* A struct encoding bundle of transformations to apply to some set of flow.
907  *
908  * xdst->child points to the next element of bundle.
909  * dst->xfrm  points to an instanse of transformer.
910  *
911  * Due to unfortunate limitations of current routing cache, which we
912  * have no time to fix, it mirrors struct rtable and bound to the same
913  * routing key, including saddr,daddr. However, we can have many of
914  * bundles differing by session id. All the bundles grow from a parent
915  * policy rule.
916  */
917 struct xfrm_dst {
918 	union {
919 		struct dst_entry	dst;
920 		struct rtable		rt;
921 		struct rt6_info		rt6;
922 	} u;
923 	struct dst_entry *route;
924 	struct dst_entry *child;
925 	struct dst_entry *path;
926 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
927 	int num_pols, num_xfrms;
928 	u32 xfrm_genid;
929 	u32 policy_genid;
930 	u32 route_mtu_cached;
931 	u32 child_mtu_cached;
932 	u32 route_cookie;
933 	u32 path_cookie;
934 };
935 
936 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
937 {
938 #ifdef CONFIG_XFRM
939 	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
940 		const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
941 
942 		return xdst->path;
943 	}
944 #endif
945 	return (struct dst_entry *) dst;
946 }
947 
948 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
949 {
950 #ifdef CONFIG_XFRM
951 	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
952 		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
953 		return xdst->child;
954 	}
955 #endif
956 	return NULL;
957 }
958 
959 #ifdef CONFIG_XFRM
960 static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
961 {
962 	xdst->child = child;
963 }
964 
965 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
966 {
967 	xfrm_pols_put(xdst->pols, xdst->num_pols);
968 	dst_release(xdst->route);
969 	if (likely(xdst->u.dst.xfrm))
970 		xfrm_state_put(xdst->u.dst.xfrm);
971 }
972 #endif
973 
974 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
975 
976 struct xfrm_if_parms {
977 	int link;		/* ifindex of underlying L2 interface */
978 	u32 if_id;		/* interface identifyer */
979 };
980 
981 struct xfrm_if {
982 	struct xfrm_if __rcu *next;	/* next interface in list */
983 	struct net_device *dev;		/* virtual device associated with interface */
984 	struct net *net;		/* netns for packet i/o */
985 	struct xfrm_if_parms p;		/* interface parms */
986 
987 	struct gro_cells gro_cells;
988 };
989 
990 struct xfrm_offload {
991 	/* Output sequence number for replay protection on offloading. */
992 	struct {
993 		__u32 low;
994 		__u32 hi;
995 	} seq;
996 
997 	__u32			flags;
998 #define	SA_DELETE_REQ		1
999 #define	CRYPTO_DONE		2
1000 #define	CRYPTO_NEXT_DONE	4
1001 #define	CRYPTO_FALLBACK		8
1002 #define	XFRM_GSO_SEGMENT	16
1003 #define	XFRM_GRO		32
1004 #define	XFRM_ESP_NO_TRAILER	64
1005 #define	XFRM_DEV_RESUME		128
1006 #define	XFRM_XMIT		256
1007 
1008 	__u32			status;
1009 #define CRYPTO_SUCCESS				1
1010 #define CRYPTO_GENERIC_ERROR			2
1011 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED		4
1012 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED	8
1013 #define CRYPTO_TUNNEL_AH_AUTH_FAILED		16
1014 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED		32
1015 #define CRYPTO_INVALID_PACKET_SYNTAX		64
1016 #define CRYPTO_INVALID_PROTOCOL			128
1017 
1018 	__u8			proto;
1019 	__u8			inner_ipproto;
1020 };
1021 
1022 struct sec_path {
1023 	int			len;
1024 	int			olen;
1025 
1026 	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
1027 	struct xfrm_offload	ovec[XFRM_MAX_OFFLOAD_DEPTH];
1028 };
1029 
1030 struct sec_path *secpath_set(struct sk_buff *skb);
1031 
1032 static inline void
1033 secpath_reset(struct sk_buff *skb)
1034 {
1035 #ifdef CONFIG_XFRM
1036 	skb_ext_del(skb, SKB_EXT_SEC_PATH);
1037 #endif
1038 }
1039 
1040 static inline int
1041 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1042 {
1043 	switch (family) {
1044 	case AF_INET:
1045 		return addr->a4 == 0;
1046 	case AF_INET6:
1047 		return ipv6_addr_any(&addr->in6);
1048 	}
1049 	return 0;
1050 }
1051 
1052 static inline int
1053 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1054 {
1055 	return	(tmpl->saddr.a4 &&
1056 		 tmpl->saddr.a4 != x->props.saddr.a4);
1057 }
1058 
1059 static inline int
1060 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1061 {
1062 	return	(!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1063 		 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1064 }
1065 
1066 static inline int
1067 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1068 {
1069 	switch (family) {
1070 	case AF_INET:
1071 		return __xfrm4_state_addr_cmp(tmpl, x);
1072 	case AF_INET6:
1073 		return __xfrm6_state_addr_cmp(tmpl, x);
1074 	}
1075 	return !0;
1076 }
1077 
1078 #ifdef CONFIG_XFRM
1079 static inline bool
1080 xfrm_default_allow(struct net *net, int dir)
1081 {
1082 	u8 def = net->xfrm.policy_default;
1083 
1084 	switch (dir) {
1085 	case XFRM_POLICY_IN:
1086 		return def & XFRM_POL_DEFAULT_IN ? false : true;
1087 	case XFRM_POLICY_OUT:
1088 		return def & XFRM_POL_DEFAULT_OUT ? false : true;
1089 	case XFRM_POLICY_FWD:
1090 		return def & XFRM_POL_DEFAULT_FWD ? false : true;
1091 	}
1092 	return false;
1093 }
1094 
1095 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1096 			unsigned short family);
1097 
1098 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1099 				       struct sk_buff *skb,
1100 				       unsigned int family, int reverse)
1101 {
1102 	struct net *net = dev_net(skb->dev);
1103 	int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1104 
1105 	if (sk && sk->sk_policy[XFRM_POLICY_IN])
1106 		return __xfrm_policy_check(sk, ndir, skb, family);
1107 
1108 	if (xfrm_default_allow(net, dir))
1109 		return (!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
1110 		       (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
1111 		       __xfrm_policy_check(sk, ndir, skb, family);
1112 	else
1113 		return (skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY)) ||
1114 		       __xfrm_policy_check(sk, ndir, skb, family);
1115 }
1116 
1117 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1118 {
1119 	return __xfrm_policy_check2(sk, dir, skb, family, 0);
1120 }
1121 
1122 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1123 {
1124 	return xfrm_policy_check(sk, dir, skb, AF_INET);
1125 }
1126 
1127 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1128 {
1129 	return xfrm_policy_check(sk, dir, skb, AF_INET6);
1130 }
1131 
1132 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1133 					     struct sk_buff *skb)
1134 {
1135 	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1136 }
1137 
1138 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1139 					     struct sk_buff *skb)
1140 {
1141 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1142 }
1143 
1144 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1145 			  unsigned int family, int reverse);
1146 
1147 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1148 				      unsigned int family)
1149 {
1150 	return __xfrm_decode_session(skb, fl, family, 0);
1151 }
1152 
1153 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1154 					      struct flowi *fl,
1155 					      unsigned int family)
1156 {
1157 	return __xfrm_decode_session(skb, fl, family, 1);
1158 }
1159 
1160 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1161 
1162 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1163 {
1164 	struct net *net = dev_net(skb->dev);
1165 
1166 	if (xfrm_default_allow(net, XFRM_POLICY_FWD))
1167 		return !net->xfrm.policy_count[XFRM_POLICY_OUT] ||
1168 			(skb_dst(skb)->flags & DST_NOXFRM) ||
1169 			__xfrm_route_forward(skb, family);
1170 	else
1171 		return (skb_dst(skb)->flags & DST_NOXFRM) ||
1172 			__xfrm_route_forward(skb, family);
1173 }
1174 
1175 static inline int xfrm4_route_forward(struct sk_buff *skb)
1176 {
1177 	return xfrm_route_forward(skb, AF_INET);
1178 }
1179 
1180 static inline int xfrm6_route_forward(struct sk_buff *skb)
1181 {
1182 	return xfrm_route_forward(skb, AF_INET6);
1183 }
1184 
1185 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1186 
1187 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1188 {
1189 	sk->sk_policy[0] = NULL;
1190 	sk->sk_policy[1] = NULL;
1191 	if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1192 		return __xfrm_sk_clone_policy(sk, osk);
1193 	return 0;
1194 }
1195 
1196 int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1197 
1198 static inline void xfrm_sk_free_policy(struct sock *sk)
1199 {
1200 	struct xfrm_policy *pol;
1201 
1202 	pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1203 	if (unlikely(pol != NULL)) {
1204 		xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1205 		sk->sk_policy[0] = NULL;
1206 	}
1207 	pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1208 	if (unlikely(pol != NULL)) {
1209 		xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1210 		sk->sk_policy[1] = NULL;
1211 	}
1212 }
1213 
1214 #else
1215 
1216 static inline void xfrm_sk_free_policy(struct sock *sk) {}
1217 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
1218 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1219 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1220 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1221 {
1222 	return 1;
1223 }
1224 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1225 {
1226 	return 1;
1227 }
1228 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1229 {
1230 	return 1;
1231 }
1232 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1233 					      struct flowi *fl,
1234 					      unsigned int family)
1235 {
1236 	return -ENOSYS;
1237 }
1238 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1239 					     struct sk_buff *skb)
1240 {
1241 	return 1;
1242 }
1243 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1244 					     struct sk_buff *skb)
1245 {
1246 	return 1;
1247 }
1248 #endif
1249 
1250 static __inline__
1251 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1252 {
1253 	switch (family){
1254 	case AF_INET:
1255 		return (xfrm_address_t *)&fl->u.ip4.daddr;
1256 	case AF_INET6:
1257 		return (xfrm_address_t *)&fl->u.ip6.daddr;
1258 	}
1259 	return NULL;
1260 }
1261 
1262 static __inline__
1263 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1264 {
1265 	switch (family){
1266 	case AF_INET:
1267 		return (xfrm_address_t *)&fl->u.ip4.saddr;
1268 	case AF_INET6:
1269 		return (xfrm_address_t *)&fl->u.ip6.saddr;
1270 	}
1271 	return NULL;
1272 }
1273 
1274 static __inline__
1275 void xfrm_flowi_addr_get(const struct flowi *fl,
1276 			 xfrm_address_t *saddr, xfrm_address_t *daddr,
1277 			 unsigned short family)
1278 {
1279 	switch(family) {
1280 	case AF_INET:
1281 		memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1282 		memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1283 		break;
1284 	case AF_INET6:
1285 		saddr->in6 = fl->u.ip6.saddr;
1286 		daddr->in6 = fl->u.ip6.daddr;
1287 		break;
1288 	}
1289 }
1290 
1291 static __inline__ int
1292 __xfrm4_state_addr_check(const struct xfrm_state *x,
1293 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1294 {
1295 	if (daddr->a4 == x->id.daddr.a4 &&
1296 	    (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1297 		return 1;
1298 	return 0;
1299 }
1300 
1301 static __inline__ int
1302 __xfrm6_state_addr_check(const struct xfrm_state *x,
1303 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1304 {
1305 	if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1306 	    (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1307 	     ipv6_addr_any((struct in6_addr *)saddr) ||
1308 	     ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1309 		return 1;
1310 	return 0;
1311 }
1312 
1313 static __inline__ int
1314 xfrm_state_addr_check(const struct xfrm_state *x,
1315 		      const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1316 		      unsigned short family)
1317 {
1318 	switch (family) {
1319 	case AF_INET:
1320 		return __xfrm4_state_addr_check(x, daddr, saddr);
1321 	case AF_INET6:
1322 		return __xfrm6_state_addr_check(x, daddr, saddr);
1323 	}
1324 	return 0;
1325 }
1326 
1327 static __inline__ int
1328 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1329 			   unsigned short family)
1330 {
1331 	switch (family) {
1332 	case AF_INET:
1333 		return __xfrm4_state_addr_check(x,
1334 						(const xfrm_address_t *)&fl->u.ip4.daddr,
1335 						(const xfrm_address_t *)&fl->u.ip4.saddr);
1336 	case AF_INET6:
1337 		return __xfrm6_state_addr_check(x,
1338 						(const xfrm_address_t *)&fl->u.ip6.daddr,
1339 						(const xfrm_address_t *)&fl->u.ip6.saddr);
1340 	}
1341 	return 0;
1342 }
1343 
1344 static inline int xfrm_state_kern(const struct xfrm_state *x)
1345 {
1346 	return atomic_read(&x->tunnel_users);
1347 }
1348 
1349 static inline bool xfrm_id_proto_valid(u8 proto)
1350 {
1351 	switch (proto) {
1352 	case IPPROTO_AH:
1353 	case IPPROTO_ESP:
1354 	case IPPROTO_COMP:
1355 #if IS_ENABLED(CONFIG_IPV6)
1356 	case IPPROTO_ROUTING:
1357 	case IPPROTO_DSTOPTS:
1358 #endif
1359 		return true;
1360 	default:
1361 		return false;
1362 	}
1363 }
1364 
1365 /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
1366 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1367 {
1368 	return (!userproto || proto == userproto ||
1369 		(userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1370 						  proto == IPPROTO_ESP ||
1371 						  proto == IPPROTO_COMP)));
1372 }
1373 
1374 /*
1375  * xfrm algorithm information
1376  */
1377 struct xfrm_algo_aead_info {
1378 	char *geniv;
1379 	u16 icv_truncbits;
1380 };
1381 
1382 struct xfrm_algo_auth_info {
1383 	u16 icv_truncbits;
1384 	u16 icv_fullbits;
1385 };
1386 
1387 struct xfrm_algo_encr_info {
1388 	char *geniv;
1389 	u16 blockbits;
1390 	u16 defkeybits;
1391 };
1392 
1393 struct xfrm_algo_comp_info {
1394 	u16 threshold;
1395 };
1396 
1397 struct xfrm_algo_desc {
1398 	char *name;
1399 	char *compat;
1400 	u8 available:1;
1401 	u8 pfkey_supported:1;
1402 	union {
1403 		struct xfrm_algo_aead_info aead;
1404 		struct xfrm_algo_auth_info auth;
1405 		struct xfrm_algo_encr_info encr;
1406 		struct xfrm_algo_comp_info comp;
1407 	} uinfo;
1408 	struct sadb_alg desc;
1409 };
1410 
1411 /* XFRM protocol handlers.  */
1412 struct xfrm4_protocol {
1413 	int (*handler)(struct sk_buff *skb);
1414 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1415 			     int encap_type);
1416 	int (*cb_handler)(struct sk_buff *skb, int err);
1417 	int (*err_handler)(struct sk_buff *skb, u32 info);
1418 
1419 	struct xfrm4_protocol __rcu *next;
1420 	int priority;
1421 };
1422 
1423 struct xfrm6_protocol {
1424 	int (*handler)(struct sk_buff *skb);
1425 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1426 			     int encap_type);
1427 	int (*cb_handler)(struct sk_buff *skb, int err);
1428 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1429 			   u8 type, u8 code, int offset, __be32 info);
1430 
1431 	struct xfrm6_protocol __rcu *next;
1432 	int priority;
1433 };
1434 
1435 /* XFRM tunnel handlers.  */
1436 struct xfrm_tunnel {
1437 	int (*handler)(struct sk_buff *skb);
1438 	int (*cb_handler)(struct sk_buff *skb, int err);
1439 	int (*err_handler)(struct sk_buff *skb, u32 info);
1440 
1441 	struct xfrm_tunnel __rcu *next;
1442 	int priority;
1443 };
1444 
1445 struct xfrm6_tunnel {
1446 	int (*handler)(struct sk_buff *skb);
1447 	int (*cb_handler)(struct sk_buff *skb, int err);
1448 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1449 			   u8 type, u8 code, int offset, __be32 info);
1450 	struct xfrm6_tunnel __rcu *next;
1451 	int priority;
1452 };
1453 
1454 void xfrm_init(void);
1455 void xfrm4_init(void);
1456 int xfrm_state_init(struct net *net);
1457 void xfrm_state_fini(struct net *net);
1458 void xfrm4_state_init(void);
1459 void xfrm4_protocol_init(void);
1460 #ifdef CONFIG_XFRM
1461 int xfrm6_init(void);
1462 void xfrm6_fini(void);
1463 int xfrm6_state_init(void);
1464 void xfrm6_state_fini(void);
1465 int xfrm6_protocol_init(void);
1466 void xfrm6_protocol_fini(void);
1467 #else
1468 static inline int xfrm6_init(void)
1469 {
1470 	return 0;
1471 }
1472 static inline void xfrm6_fini(void)
1473 {
1474 	;
1475 }
1476 #endif
1477 
1478 #ifdef CONFIG_XFRM_STATISTICS
1479 int xfrm_proc_init(struct net *net);
1480 void xfrm_proc_fini(struct net *net);
1481 #endif
1482 
1483 int xfrm_sysctl_init(struct net *net);
1484 #ifdef CONFIG_SYSCTL
1485 void xfrm_sysctl_fini(struct net *net);
1486 #else
1487 static inline void xfrm_sysctl_fini(struct net *net)
1488 {
1489 }
1490 #endif
1491 
1492 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1493 			  struct xfrm_address_filter *filter);
1494 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1495 		    int (*func)(struct xfrm_state *, int, void*), void *);
1496 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1497 struct xfrm_state *xfrm_state_alloc(struct net *net);
1498 void xfrm_state_free(struct xfrm_state *x);
1499 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1500 				   const xfrm_address_t *saddr,
1501 				   const struct flowi *fl,
1502 				   struct xfrm_tmpl *tmpl,
1503 				   struct xfrm_policy *pol, int *err,
1504 				   unsigned short family, u32 if_id);
1505 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1506 				       xfrm_address_t *daddr,
1507 				       xfrm_address_t *saddr,
1508 				       unsigned short family,
1509 				       u8 mode, u8 proto, u32 reqid);
1510 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1511 					      unsigned short family);
1512 int xfrm_state_check_expire(struct xfrm_state *x);
1513 void xfrm_state_insert(struct xfrm_state *x);
1514 int xfrm_state_add(struct xfrm_state *x);
1515 int xfrm_state_update(struct xfrm_state *x);
1516 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1517 				     const xfrm_address_t *daddr, __be32 spi,
1518 				     u8 proto, unsigned short family);
1519 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1520 					    const xfrm_address_t *daddr,
1521 					    const xfrm_address_t *saddr,
1522 					    u8 proto,
1523 					    unsigned short family);
1524 #ifdef CONFIG_XFRM_SUB_POLICY
1525 void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1526 		    unsigned short family);
1527 void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1528 		     unsigned short family);
1529 #else
1530 static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
1531 				  int n, unsigned short family)
1532 {
1533 }
1534 
1535 static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
1536 				   int n, unsigned short family)
1537 {
1538 }
1539 #endif
1540 
1541 struct xfrmk_sadinfo {
1542 	u32 sadhcnt; /* current hash bkts */
1543 	u32 sadhmcnt; /* max allowed hash bkts */
1544 	u32 sadcnt; /* current running count */
1545 };
1546 
1547 struct xfrmk_spdinfo {
1548 	u32 incnt;
1549 	u32 outcnt;
1550 	u32 fwdcnt;
1551 	u32 inscnt;
1552 	u32 outscnt;
1553 	u32 fwdscnt;
1554 	u32 spdhcnt;
1555 	u32 spdhmcnt;
1556 };
1557 
1558 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1559 int xfrm_state_delete(struct xfrm_state *x);
1560 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1561 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1562 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1563 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1564 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1565 int xfrm_init_replay(struct xfrm_state *x);
1566 u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu);
1567 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
1568 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
1569 int xfrm_init_state(struct xfrm_state *x);
1570 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1571 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1572 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
1573 			 int (*finish)(struct net *, struct sock *,
1574 				       struct sk_buff *));
1575 int xfrm_trans_queue(struct sk_buff *skb,
1576 		     int (*finish)(struct net *, struct sock *,
1577 				   struct sk_buff *));
1578 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
1579 int xfrm_output(struct sock *sk, struct sk_buff *skb);
1580 
1581 #if IS_ENABLED(CONFIG_NET_PKTGEN)
1582 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
1583 #endif
1584 
1585 void xfrm_local_error(struct sk_buff *skb, int mtu);
1586 int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1587 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1588 		    int encap_type);
1589 int xfrm4_transport_finish(struct sk_buff *skb, int async);
1590 int xfrm4_rcv(struct sk_buff *skb);
1591 
1592 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1593 {
1594 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1595 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1596 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1597 	return xfrm_input(skb, nexthdr, spi, 0);
1598 }
1599 
1600 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1601 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1602 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1603 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1604 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1605 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1606 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1607 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1608 		  struct ip6_tnl *t);
1609 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1610 		    int encap_type);
1611 int xfrm6_transport_finish(struct sk_buff *skb, int async);
1612 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1613 int xfrm6_rcv(struct sk_buff *skb);
1614 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1615 		     xfrm_address_t *saddr, u8 proto);
1616 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1617 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
1618 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
1619 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1620 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1621 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1622 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1623 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1624 
1625 #ifdef CONFIG_XFRM
1626 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
1627 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1628 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1629 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
1630 		     int optlen);
1631 #else
1632 static inline int xfrm_user_policy(struct sock *sk, int optname,
1633 				   sockptr_t optval, int optlen)
1634 {
1635  	return -ENOPROTOOPT;
1636 }
1637 #endif
1638 
1639 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
1640 				    const xfrm_address_t *saddr,
1641 				    const xfrm_address_t *daddr,
1642 				    int family, u32 mark);
1643 
1644 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1645 
1646 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1647 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1648 		     int (*func)(struct xfrm_policy *, int, int, void*),
1649 		     void *);
1650 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1651 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1652 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
1653 					  const struct xfrm_mark *mark,
1654 					  u32 if_id, u8 type, int dir,
1655 					  struct xfrm_selector *sel,
1656 					  struct xfrm_sec_ctx *ctx, int delete,
1657 					  int *err);
1658 struct xfrm_policy *xfrm_policy_byid(struct net *net,
1659 				     const struct xfrm_mark *mark, u32 if_id,
1660 				     u8 type, int dir, u32 id, int delete,
1661 				     int *err);
1662 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1663 void xfrm_policy_hash_rebuild(struct net *net);
1664 u32 xfrm_get_acqseq(void);
1665 int verify_spi_info(u8 proto, u32 min, u32 max);
1666 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1667 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1668 				 u8 mode, u32 reqid, u32 if_id, u8 proto,
1669 				 const xfrm_address_t *daddr,
1670 				 const xfrm_address_t *saddr, int create,
1671 				 unsigned short family);
1672 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1673 
1674 #ifdef CONFIG_XFRM_MIGRATE
1675 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1676 	       const struct xfrm_migrate *m, int num_bundles,
1677 	       const struct xfrm_kmaddress *k,
1678 	       const struct xfrm_encap_tmpl *encap);
1679 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net);
1680 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1681 				      struct xfrm_migrate *m,
1682 				      struct xfrm_encap_tmpl *encap);
1683 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1684 		 struct xfrm_migrate *m, int num_bundles,
1685 		 struct xfrm_kmaddress *k, struct net *net,
1686 		 struct xfrm_encap_tmpl *encap);
1687 #endif
1688 
1689 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1690 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1691 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1692 	      xfrm_address_t *addr);
1693 
1694 void xfrm_input_init(void);
1695 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1696 
1697 void xfrm_probe_algs(void);
1698 int xfrm_count_pfkey_auth_supported(void);
1699 int xfrm_count_pfkey_enc_supported(void);
1700 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1701 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1702 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1703 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1704 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1705 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1706 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1707 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1708 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1709 					    int probe);
1710 
1711 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1712 				    const xfrm_address_t *b)
1713 {
1714 	return ipv6_addr_equal((const struct in6_addr *)a,
1715 			       (const struct in6_addr *)b);
1716 }
1717 
1718 static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1719 				   const xfrm_address_t *b,
1720 				   sa_family_t family)
1721 {
1722 	switch (family) {
1723 	default:
1724 	case AF_INET:
1725 		return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1726 	case AF_INET6:
1727 		return xfrm6_addr_equal(a, b);
1728 	}
1729 }
1730 
1731 static inline int xfrm_policy_id2dir(u32 index)
1732 {
1733 	return index & 7;
1734 }
1735 
1736 #ifdef CONFIG_XFRM
1737 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
1738 int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1739 void xfrm_replay_notify(struct xfrm_state *x, int event);
1740 int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
1741 int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1742 
1743 static inline int xfrm_aevent_is_on(struct net *net)
1744 {
1745 	struct sock *nlsk;
1746 	int ret = 0;
1747 
1748 	rcu_read_lock();
1749 	nlsk = rcu_dereference(net->xfrm.nlsk);
1750 	if (nlsk)
1751 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1752 	rcu_read_unlock();
1753 	return ret;
1754 }
1755 
1756 static inline int xfrm_acquire_is_on(struct net *net)
1757 {
1758 	struct sock *nlsk;
1759 	int ret = 0;
1760 
1761 	rcu_read_lock();
1762 	nlsk = rcu_dereference(net->xfrm.nlsk);
1763 	if (nlsk)
1764 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1765 	rcu_read_unlock();
1766 
1767 	return ret;
1768 }
1769 #endif
1770 
1771 static inline unsigned int aead_len(struct xfrm_algo_aead *alg)
1772 {
1773 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1774 }
1775 
1776 static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
1777 {
1778 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1779 }
1780 
1781 static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1782 {
1783 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1784 }
1785 
1786 static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
1787 {
1788 	return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
1789 }
1790 
1791 #ifdef CONFIG_XFRM_MIGRATE
1792 static inline int xfrm_replay_clone(struct xfrm_state *x,
1793 				     struct xfrm_state *orig)
1794 {
1795 
1796 	x->replay_esn = kmemdup(orig->replay_esn,
1797 				xfrm_replay_state_esn_len(orig->replay_esn),
1798 				GFP_KERNEL);
1799 	if (!x->replay_esn)
1800 		return -ENOMEM;
1801 	x->preplay_esn = kmemdup(orig->preplay_esn,
1802 				 xfrm_replay_state_esn_len(orig->preplay_esn),
1803 				 GFP_KERNEL);
1804 	if (!x->preplay_esn)
1805 		return -ENOMEM;
1806 
1807 	return 0;
1808 }
1809 
1810 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
1811 {
1812 	return kmemdup(orig, aead_len(orig), GFP_KERNEL);
1813 }
1814 
1815 
1816 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1817 {
1818 	return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1819 }
1820 
1821 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
1822 {
1823 	return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
1824 }
1825 
1826 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1827 {
1828 	int i;
1829 	for (i = 0; i < n; i++)
1830 		xfrm_state_put(*(states + i));
1831 }
1832 
1833 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1834 {
1835 	int i;
1836 	for (i = 0; i < n; i++)
1837 		xfrm_state_delete(*(states + i));
1838 }
1839 #endif
1840 
1841 #ifdef CONFIG_XFRM
1842 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1843 {
1844 	struct sec_path *sp = skb_sec_path(skb);
1845 
1846 	return sp->xvec[sp->len - 1];
1847 }
1848 #endif
1849 
1850 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1851 {
1852 #ifdef CONFIG_XFRM
1853 	struct sec_path *sp = skb_sec_path(skb);
1854 
1855 	if (!sp || !sp->olen || sp->len != sp->olen)
1856 		return NULL;
1857 
1858 	return &sp->ovec[sp->olen - 1];
1859 #else
1860 	return NULL;
1861 #endif
1862 }
1863 
1864 void __init xfrm_dev_init(void);
1865 
1866 #ifdef CONFIG_XFRM_OFFLOAD
1867 void xfrm_dev_resume(struct sk_buff *skb);
1868 void xfrm_dev_backlog(struct softnet_data *sd);
1869 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
1870 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
1871 		       struct xfrm_user_offload *xuo);
1872 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
1873 
1874 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
1875 {
1876 	struct xfrm_state_offload *xso = &x->xso;
1877 
1878 	if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
1879 		xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
1880 }
1881 
1882 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
1883 {
1884 	struct xfrm_state *x = dst->xfrm;
1885 	struct xfrm_dst *xdst;
1886 
1887 	if (!x || !x->type_offload)
1888 		return false;
1889 
1890 	xdst = (struct xfrm_dst *) dst;
1891 	if (!x->xso.offload_handle && !xdst->child->xfrm)
1892 		return true;
1893 	if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
1894 	    !xdst->child->xfrm)
1895 		return true;
1896 
1897 	return false;
1898 }
1899 
1900 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
1901 {
1902 	struct xfrm_state_offload *xso = &x->xso;
1903 
1904 	if (xso->dev)
1905 		xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
1906 }
1907 
1908 static inline void xfrm_dev_state_free(struct xfrm_state *x)
1909 {
1910 	struct xfrm_state_offload *xso = &x->xso;
1911 	struct net_device *dev = xso->dev;
1912 
1913 	if (dev && dev->xfrmdev_ops) {
1914 		if (dev->xfrmdev_ops->xdo_dev_state_free)
1915 			dev->xfrmdev_ops->xdo_dev_state_free(x);
1916 		xso->dev = NULL;
1917 		dev_put_track(dev, &xso->dev_tracker);
1918 	}
1919 }
1920 #else
1921 static inline void xfrm_dev_resume(struct sk_buff *skb)
1922 {
1923 }
1924 
1925 static inline void xfrm_dev_backlog(struct softnet_data *sd)
1926 {
1927 }
1928 
1929 static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
1930 {
1931 	return skb;
1932 }
1933 
1934 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
1935 {
1936 	return 0;
1937 }
1938 
1939 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
1940 {
1941 }
1942 
1943 static inline void xfrm_dev_state_free(struct xfrm_state *x)
1944 {
1945 }
1946 
1947 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
1948 {
1949 	return false;
1950 }
1951 
1952 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
1953 {
1954 }
1955 
1956 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
1957 {
1958 	return false;
1959 }
1960 #endif
1961 
1962 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
1963 {
1964 	if (attrs[XFRMA_MARK])
1965 		memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
1966 	else
1967 		m->v = m->m = 0;
1968 
1969 	return m->v & m->m;
1970 }
1971 
1972 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
1973 {
1974 	int ret = 0;
1975 
1976 	if (m->m | m->v)
1977 		ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
1978 	return ret;
1979 }
1980 
1981 static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
1982 {
1983 	struct xfrm_mark *m = &x->props.smark;
1984 
1985 	return (m->v & m->m) | (mark & ~m->m);
1986 }
1987 
1988 static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
1989 {
1990 	int ret = 0;
1991 
1992 	if (if_id)
1993 		ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
1994 	return ret;
1995 }
1996 
1997 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
1998 				    unsigned int family)
1999 {
2000 	bool tunnel = false;
2001 
2002 	switch(family) {
2003 	case AF_INET:
2004 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
2005 			tunnel = true;
2006 		break;
2007 	case AF_INET6:
2008 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
2009 			tunnel = true;
2010 		break;
2011 	}
2012 	if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
2013 		return -EINVAL;
2014 
2015 	return 0;
2016 }
2017 
2018 extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
2019 extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
2020 
2021 struct xfrm_translator {
2022 	/* Allocate frag_list and put compat translation there */
2023 	int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
2024 
2025 	/* Allocate nlmsg with 64-bit translaton of received 32-bit message */
2026 	struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
2027 			int maxtype, const struct nla_policy *policy,
2028 			struct netlink_ext_ack *extack);
2029 
2030 	/* Translate 32-bit user_policy from sockptr */
2031 	int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
2032 
2033 	struct module *owner;
2034 };
2035 
2036 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2037 extern int xfrm_register_translator(struct xfrm_translator *xtr);
2038 extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
2039 extern struct xfrm_translator *xfrm_get_translator(void);
2040 extern void xfrm_put_translator(struct xfrm_translator *xtr);
2041 #else
2042 static inline struct xfrm_translator *xfrm_get_translator(void)
2043 {
2044 	return NULL;
2045 }
2046 static inline void xfrm_put_translator(struct xfrm_translator *xtr)
2047 {
2048 }
2049 #endif
2050 
2051 #if IS_ENABLED(CONFIG_IPV6)
2052 static inline bool xfrm6_local_dontfrag(const struct sock *sk)
2053 {
2054 	int proto;
2055 
2056 	if (!sk || sk->sk_family != AF_INET6)
2057 		return false;
2058 
2059 	proto = sk->sk_protocol;
2060 	if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
2061 		return inet6_sk(sk)->dontfrag;
2062 
2063 	return false;
2064 }
2065 #endif
2066 #endif	/* _NET_XFRM_H */
2067