xref: /linux/include/net/xfrm.h (revision 93a3545d812ae7cfe4426374e00a7d8f64ac02e0)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _NET_XFRM_H
3 #define _NET_XFRM_H
4 
5 #include <linux/compiler.h>
6 #include <linux/xfrm.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/ipsec.h>
13 #include <linux/in6.h>
14 #include <linux/mutex.h>
15 #include <linux/audit.h>
16 #include <linux/slab.h>
17 #include <linux/refcount.h>
18 
19 #include <net/sock.h>
20 #include <net/dst.h>
21 #include <net/ip.h>
22 #include <net/route.h>
23 #include <net/ipv6.h>
24 #include <net/ip6_fib.h>
25 #include <net/flow.h>
26 #include <net/gro_cells.h>
27 
28 #include <linux/interrupt.h>
29 
30 #ifdef CONFIG_XFRM_STATISTICS
31 #include <net/snmp.h>
32 #endif
33 
34 #define XFRM_PROTO_ESP		50
35 #define XFRM_PROTO_AH		51
36 #define XFRM_PROTO_COMP		108
37 #define XFRM_PROTO_IPIP		4
38 #define XFRM_PROTO_IPV6		41
39 #define XFRM_PROTO_ROUTING	IPPROTO_ROUTING
40 #define XFRM_PROTO_DSTOPTS	IPPROTO_DSTOPTS
41 
42 #define XFRM_ALIGN4(len)	(((len) + 3) & ~3)
43 #define XFRM_ALIGN8(len)	(((len) + 7) & ~7)
44 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
45 	MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
46 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
47 	MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
48 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
49 	MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
50 
51 #ifdef CONFIG_XFRM_STATISTICS
52 #define XFRM_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
53 #else
54 #define XFRM_INC_STATS(net, field)	((void)(net))
55 #endif
56 
57 
58 /* Organization of SPD aka "XFRM rules"
59    ------------------------------------
60 
61    Basic objects:
62    - policy rule, struct xfrm_policy (=SPD entry)
63    - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
64    - instance of a transformer, struct xfrm_state (=SA)
65    - template to clone xfrm_state, struct xfrm_tmpl
66 
67    SPD is plain linear list of xfrm_policy rules, ordered by priority.
68    (To be compatible with existing pfkeyv2 implementations,
69    many rules with priority of 0x7fffffff are allowed to exist and
70    such rules are ordered in an unpredictable way, thanks to bsd folks.)
71 
72    Lookup is plain linear search until the first match with selector.
73 
74    If "action" is "block", then we prohibit the flow, otherwise:
75    if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
76    policy entry has list of up to XFRM_MAX_DEPTH transformations,
77    described by templates xfrm_tmpl. Each template is resolved
78    to a complete xfrm_state (see below) and we pack bundle of transformations
79    to a dst_entry returned to requestor.
80 
81    dst -. xfrm  .-> xfrm_state #1
82     |---. child .-> dst -. xfrm .-> xfrm_state #2
83                      |---. child .-> dst -. xfrm .-> xfrm_state #3
84                                       |---. child .-> NULL
85 
86    Bundles are cached at xrfm_policy struct (field ->bundles).
87 
88 
89    Resolution of xrfm_tmpl
90    -----------------------
91    Template contains:
92    1. ->mode		Mode: transport or tunnel
93    2. ->id.proto	Protocol: AH/ESP/IPCOMP
94    3. ->id.daddr	Remote tunnel endpoint, ignored for transport mode.
95       Q: allow to resolve security gateway?
96    4. ->id.spi          If not zero, static SPI.
97    5. ->saddr		Local tunnel endpoint, ignored for transport mode.
98    6. ->algos		List of allowed algos. Plain bitmask now.
99       Q: ealgos, aalgos, calgos. What a mess...
100    7. ->share		Sharing mode.
101       Q: how to implement private sharing mode? To add struct sock* to
102       flow id?
103 
104    Having this template we search through SAD searching for entries
105    with appropriate mode/proto/algo, permitted by selector.
106    If no appropriate entry found, it is requested from key manager.
107 
108    PROBLEMS:
109    Q: How to find all the bundles referring to a physical path for
110       PMTU discovery? Seems, dst should contain list of all parents...
111       and enter to infinite locking hierarchy disaster.
112       No! It is easier, we will not search for them, let them find us.
113       We add genid to each dst plus pointer to genid of raw IP route,
114       pmtu disc will update pmtu on raw IP route and increase its genid.
115       dst_check() will see this for top level and trigger resyncing
116       metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
117  */
118 
119 struct xfrm_state_walk {
120 	struct list_head	all;
121 	u8			state;
122 	u8			dying;
123 	u8			proto;
124 	u32			seq;
125 	struct xfrm_address_filter *filter;
126 };
127 
128 struct xfrm_state_offload {
129 	struct net_device	*dev;
130 	struct net_device	*real_dev;
131 	unsigned long		offload_handle;
132 	unsigned int		num_exthdrs;
133 	u8			flags;
134 };
135 
136 struct xfrm_mode {
137 	u8 encap;
138 	u8 family;
139 	u8 flags;
140 };
141 
142 /* Flags for xfrm_mode. */
143 enum {
144 	XFRM_MODE_FLAG_TUNNEL = 1,
145 };
146 
147 /* Full description of state of transformer. */
148 struct xfrm_state {
149 	possible_net_t		xs_net;
150 	union {
151 		struct hlist_node	gclist;
152 		struct hlist_node	bydst;
153 	};
154 	struct hlist_node	bysrc;
155 	struct hlist_node	byspi;
156 
157 	refcount_t		refcnt;
158 	spinlock_t		lock;
159 
160 	struct xfrm_id		id;
161 	struct xfrm_selector	sel;
162 	struct xfrm_mark	mark;
163 	u32			if_id;
164 	u32			tfcpad;
165 
166 	u32			genid;
167 
168 	/* Key manager bits */
169 	struct xfrm_state_walk	km;
170 
171 	/* Parameters of this state. */
172 	struct {
173 		u32		reqid;
174 		u8		mode;
175 		u8		replay_window;
176 		u8		aalgo, ealgo, calgo;
177 		u8		flags;
178 		u16		family;
179 		xfrm_address_t	saddr;
180 		int		header_len;
181 		int		trailer_len;
182 		u32		extra_flags;
183 		struct xfrm_mark	smark;
184 	} props;
185 
186 	struct xfrm_lifetime_cfg lft;
187 
188 	/* Data for transformer */
189 	struct xfrm_algo_auth	*aalg;
190 	struct xfrm_algo	*ealg;
191 	struct xfrm_algo	*calg;
192 	struct xfrm_algo_aead	*aead;
193 	const char		*geniv;
194 
195 	/* Data for encapsulator */
196 	struct xfrm_encap_tmpl	*encap;
197 	struct sock __rcu	*encap_sk;
198 
199 	/* Data for care-of address */
200 	xfrm_address_t	*coaddr;
201 
202 	/* IPComp needs an IPIP tunnel for handling uncompressed packets */
203 	struct xfrm_state	*tunnel;
204 
205 	/* If a tunnel, number of users + 1 */
206 	atomic_t		tunnel_users;
207 
208 	/* State for replay detection */
209 	struct xfrm_replay_state replay;
210 	struct xfrm_replay_state_esn *replay_esn;
211 
212 	/* Replay detection state at the time we sent the last notification */
213 	struct xfrm_replay_state preplay;
214 	struct xfrm_replay_state_esn *preplay_esn;
215 
216 	/* The functions for replay detection. */
217 	const struct xfrm_replay *repl;
218 
219 	/* internal flag that only holds state for delayed aevent at the
220 	 * moment
221 	*/
222 	u32			xflags;
223 
224 	/* Replay detection notification settings */
225 	u32			replay_maxage;
226 	u32			replay_maxdiff;
227 
228 	/* Replay detection notification timer */
229 	struct timer_list	rtimer;
230 
231 	/* Statistics */
232 	struct xfrm_stats	stats;
233 
234 	struct xfrm_lifetime_cur curlft;
235 	struct hrtimer		mtimer;
236 
237 	struct xfrm_state_offload xso;
238 
239 	/* used to fix curlft->add_time when changing date */
240 	long		saved_tmo;
241 
242 	/* Last used time */
243 	time64_t		lastused;
244 
245 	struct page_frag xfrag;
246 
247 	/* Reference to data common to all the instances of this
248 	 * transformer. */
249 	const struct xfrm_type	*type;
250 	struct xfrm_mode	inner_mode;
251 	struct xfrm_mode	inner_mode_iaf;
252 	struct xfrm_mode	outer_mode;
253 
254 	const struct xfrm_type_offload	*type_offload;
255 
256 	/* Security context */
257 	struct xfrm_sec_ctx	*security;
258 
259 	/* Private data of this transformer, format is opaque,
260 	 * interpreted by xfrm_type methods. */
261 	void			*data;
262 };
263 
264 static inline struct net *xs_net(struct xfrm_state *x)
265 {
266 	return read_pnet(&x->xs_net);
267 }
268 
269 /* xflags - make enum if more show up */
270 #define XFRM_TIME_DEFER	1
271 #define XFRM_SOFT_EXPIRE 2
272 
273 enum {
274 	XFRM_STATE_VOID,
275 	XFRM_STATE_ACQ,
276 	XFRM_STATE_VALID,
277 	XFRM_STATE_ERROR,
278 	XFRM_STATE_EXPIRED,
279 	XFRM_STATE_DEAD
280 };
281 
282 /* callback structure passed from either netlink or pfkey */
283 struct km_event {
284 	union {
285 		u32 hard;
286 		u32 proto;
287 		u32 byid;
288 		u32 aevent;
289 		u32 type;
290 	} data;
291 
292 	u32	seq;
293 	u32	portid;
294 	u32	event;
295 	struct net *net;
296 };
297 
298 struct xfrm_replay {
299 	void	(*advance)(struct xfrm_state *x, __be32 net_seq);
300 	int	(*check)(struct xfrm_state *x,
301 			 struct sk_buff *skb,
302 			 __be32 net_seq);
303 	int	(*recheck)(struct xfrm_state *x,
304 			   struct sk_buff *skb,
305 			   __be32 net_seq);
306 	void	(*notify)(struct xfrm_state *x, int event);
307 	int	(*overflow)(struct xfrm_state *x, struct sk_buff *skb);
308 };
309 
310 struct xfrm_if_cb {
311 	struct xfrm_if	*(*decode_session)(struct sk_buff *skb,
312 					   unsigned short family);
313 };
314 
315 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
316 void xfrm_if_unregister_cb(void);
317 
318 struct net_device;
319 struct xfrm_type;
320 struct xfrm_dst;
321 struct xfrm_policy_afinfo {
322 	struct dst_ops		*dst_ops;
323 	struct dst_entry	*(*dst_lookup)(struct net *net,
324 					       int tos, int oif,
325 					       const xfrm_address_t *saddr,
326 					       const xfrm_address_t *daddr,
327 					       u32 mark);
328 	int			(*get_saddr)(struct net *net, int oif,
329 					     xfrm_address_t *saddr,
330 					     xfrm_address_t *daddr,
331 					     u32 mark);
332 	int			(*fill_dst)(struct xfrm_dst *xdst,
333 					    struct net_device *dev,
334 					    const struct flowi *fl);
335 	struct dst_entry	*(*blackhole_route)(struct net *net, struct dst_entry *orig);
336 };
337 
338 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
339 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
340 void km_policy_notify(struct xfrm_policy *xp, int dir,
341 		      const struct km_event *c);
342 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
343 
344 struct xfrm_tmpl;
345 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
346 	     struct xfrm_policy *pol);
347 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
348 int __xfrm_state_delete(struct xfrm_state *x);
349 
350 struct xfrm_state_afinfo {
351 	u8				family;
352 	u8				proto;
353 
354 	const struct xfrm_type_offload *type_offload_esp;
355 
356 	const struct xfrm_type		*type_esp;
357 	const struct xfrm_type		*type_ipip;
358 	const struct xfrm_type		*type_ipip6;
359 	const struct xfrm_type		*type_comp;
360 	const struct xfrm_type		*type_ah;
361 	const struct xfrm_type		*type_routing;
362 	const struct xfrm_type		*type_dstopts;
363 
364 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
365 	int			(*transport_finish)(struct sk_buff *skb,
366 						    int async);
367 	void			(*local_error)(struct sk_buff *skb, u32 mtu);
368 };
369 
370 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
371 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
372 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
373 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
374 
375 struct xfrm_input_afinfo {
376 	unsigned int		family;
377 	int			(*callback)(struct sk_buff *skb, u8 protocol,
378 					    int err);
379 };
380 
381 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
382 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
383 
384 void xfrm_flush_gc(void);
385 void xfrm_state_delete_tunnel(struct xfrm_state *x);
386 
387 struct xfrm_type {
388 	char			*description;
389 	struct module		*owner;
390 	u8			proto;
391 	u8			flags;
392 #define XFRM_TYPE_NON_FRAGMENT	1
393 #define XFRM_TYPE_REPLAY_PROT	2
394 #define XFRM_TYPE_LOCAL_COADDR	4
395 #define XFRM_TYPE_REMOTE_COADDR	8
396 
397 	int			(*init_state)(struct xfrm_state *x);
398 	void			(*destructor)(struct xfrm_state *);
399 	int			(*input)(struct xfrm_state *, struct sk_buff *skb);
400 	int			(*output)(struct xfrm_state *, struct sk_buff *pskb);
401 	int			(*reject)(struct xfrm_state *, struct sk_buff *,
402 					  const struct flowi *);
403 	int			(*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
404 };
405 
406 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
407 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
408 
409 struct xfrm_type_offload {
410 	char		*description;
411 	struct module	*owner;
412 	u8		proto;
413 	void		(*encap)(struct xfrm_state *, struct sk_buff *pskb);
414 	int		(*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
415 	int		(*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
416 };
417 
418 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
419 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
420 
421 static inline int xfrm_af2proto(unsigned int family)
422 {
423 	switch(family) {
424 	case AF_INET:
425 		return IPPROTO_IPIP;
426 	case AF_INET6:
427 		return IPPROTO_IPV6;
428 	default:
429 		return 0;
430 	}
431 }
432 
433 static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
434 {
435 	if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
436 	    (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
437 		return &x->inner_mode;
438 	else
439 		return &x->inner_mode_iaf;
440 }
441 
442 struct xfrm_tmpl {
443 /* id in template is interpreted as:
444  * daddr - destination of tunnel, may be zero for transport mode.
445  * spi   - zero to acquire spi. Not zero if spi is static, then
446  *	   daddr must be fixed too.
447  * proto - AH/ESP/IPCOMP
448  */
449 	struct xfrm_id		id;
450 
451 /* Source address of tunnel. Ignored, if it is not a tunnel. */
452 	xfrm_address_t		saddr;
453 
454 	unsigned short		encap_family;
455 
456 	u32			reqid;
457 
458 /* Mode: transport, tunnel etc. */
459 	u8			mode;
460 
461 /* Sharing mode: unique, this session only, this user only etc. */
462 	u8			share;
463 
464 /* May skip this transfomration if no SA is found */
465 	u8			optional;
466 
467 /* Skip aalgos/ealgos/calgos checks. */
468 	u8			allalgs;
469 
470 /* Bit mask of algos allowed for acquisition */
471 	u32			aalgos;
472 	u32			ealgos;
473 	u32			calgos;
474 };
475 
476 #define XFRM_MAX_DEPTH		6
477 #define XFRM_MAX_OFFLOAD_DEPTH	1
478 
479 struct xfrm_policy_walk_entry {
480 	struct list_head	all;
481 	u8			dead;
482 };
483 
484 struct xfrm_policy_walk {
485 	struct xfrm_policy_walk_entry walk;
486 	u8 type;
487 	u32 seq;
488 };
489 
490 struct xfrm_policy_queue {
491 	struct sk_buff_head	hold_queue;
492 	struct timer_list	hold_timer;
493 	unsigned long		timeout;
494 };
495 
496 struct xfrm_policy {
497 	possible_net_t		xp_net;
498 	struct hlist_node	bydst;
499 	struct hlist_node	byidx;
500 
501 	/* This lock only affects elements except for entry. */
502 	rwlock_t		lock;
503 	refcount_t		refcnt;
504 	u32			pos;
505 	struct timer_list	timer;
506 
507 	atomic_t		genid;
508 	u32			priority;
509 	u32			index;
510 	u32			if_id;
511 	struct xfrm_mark	mark;
512 	struct xfrm_selector	selector;
513 	struct xfrm_lifetime_cfg lft;
514 	struct xfrm_lifetime_cur curlft;
515 	struct xfrm_policy_walk_entry walk;
516 	struct xfrm_policy_queue polq;
517 	bool                    bydst_reinsert;
518 	u8			type;
519 	u8			action;
520 	u8			flags;
521 	u8			xfrm_nr;
522 	u16			family;
523 	struct xfrm_sec_ctx	*security;
524 	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
525 	struct hlist_node	bydst_inexact_list;
526 	struct rcu_head		rcu;
527 };
528 
529 static inline struct net *xp_net(const struct xfrm_policy *xp)
530 {
531 	return read_pnet(&xp->xp_net);
532 }
533 
534 struct xfrm_kmaddress {
535 	xfrm_address_t          local;
536 	xfrm_address_t          remote;
537 	u32			reserved;
538 	u16			family;
539 };
540 
541 struct xfrm_migrate {
542 	xfrm_address_t		old_daddr;
543 	xfrm_address_t		old_saddr;
544 	xfrm_address_t		new_daddr;
545 	xfrm_address_t		new_saddr;
546 	u8			proto;
547 	u8			mode;
548 	u16			reserved;
549 	u32			reqid;
550 	u16			old_family;
551 	u16			new_family;
552 };
553 
554 #define XFRM_KM_TIMEOUT                30
555 /* what happened */
556 #define XFRM_REPLAY_UPDATE	XFRM_AE_CR
557 #define XFRM_REPLAY_TIMEOUT	XFRM_AE_CE
558 
559 /* default aevent timeout in units of 100ms */
560 #define XFRM_AE_ETIME			10
561 /* Async Event timer multiplier */
562 #define XFRM_AE_ETH_M			10
563 /* default seq threshold size */
564 #define XFRM_AE_SEQT_SIZE		2
565 
566 struct xfrm_mgr {
567 	struct list_head	list;
568 	int			(*notify)(struct xfrm_state *x, const struct km_event *c);
569 	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
570 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
571 	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
572 	int			(*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
573 	int			(*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
574 	int			(*migrate)(const struct xfrm_selector *sel,
575 					   u8 dir, u8 type,
576 					   const struct xfrm_migrate *m,
577 					   int num_bundles,
578 					   const struct xfrm_kmaddress *k,
579 					   const struct xfrm_encap_tmpl *encap);
580 	bool			(*is_alive)(const struct km_event *c);
581 };
582 
583 int xfrm_register_km(struct xfrm_mgr *km);
584 int xfrm_unregister_km(struct xfrm_mgr *km);
585 
586 struct xfrm_tunnel_skb_cb {
587 	union {
588 		struct inet_skb_parm h4;
589 		struct inet6_skb_parm h6;
590 	} header;
591 
592 	union {
593 		struct ip_tunnel *ip4;
594 		struct ip6_tnl *ip6;
595 	} tunnel;
596 };
597 
598 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
599 
600 /*
601  * This structure is used for the duration where packets are being
602  * transformed by IPsec.  As soon as the packet leaves IPsec the
603  * area beyond the generic IP part may be overwritten.
604  */
605 struct xfrm_skb_cb {
606 	struct xfrm_tunnel_skb_cb header;
607 
608         /* Sequence number for replay protection. */
609 	union {
610 		struct {
611 			__u32 low;
612 			__u32 hi;
613 		} output;
614 		struct {
615 			__be32 low;
616 			__be32 hi;
617 		} input;
618 	} seq;
619 };
620 
621 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
622 
623 /*
624  * This structure is used by the afinfo prepare_input/prepare_output functions
625  * to transmit header information to the mode input/output functions.
626  */
627 struct xfrm_mode_skb_cb {
628 	struct xfrm_tunnel_skb_cb header;
629 
630 	/* Copied from header for IPv4, always set to zero and DF for IPv6. */
631 	__be16 id;
632 	__be16 frag_off;
633 
634 	/* IP header length (excluding options or extension headers). */
635 	u8 ihl;
636 
637 	/* TOS for IPv4, class for IPv6. */
638 	u8 tos;
639 
640 	/* TTL for IPv4, hop limitfor IPv6. */
641 	u8 ttl;
642 
643 	/* Protocol for IPv4, NH for IPv6. */
644 	u8 protocol;
645 
646 	/* Option length for IPv4, zero for IPv6. */
647 	u8 optlen;
648 
649 	/* Used by IPv6 only, zero for IPv4. */
650 	u8 flow_lbl[3];
651 };
652 
653 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
654 
655 /*
656  * This structure is used by the input processing to locate the SPI and
657  * related information.
658  */
659 struct xfrm_spi_skb_cb {
660 	struct xfrm_tunnel_skb_cb header;
661 
662 	unsigned int daddroff;
663 	unsigned int family;
664 	__be32 seq;
665 };
666 
667 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
668 
669 #ifdef CONFIG_AUDITSYSCALL
670 static inline struct audit_buffer *xfrm_audit_start(const char *op)
671 {
672 	struct audit_buffer *audit_buf = NULL;
673 
674 	if (audit_enabled == AUDIT_OFF)
675 		return NULL;
676 	audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
677 				    AUDIT_MAC_IPSEC_EVENT);
678 	if (audit_buf == NULL)
679 		return NULL;
680 	audit_log_format(audit_buf, "op=%s", op);
681 	return audit_buf;
682 }
683 
684 static inline void xfrm_audit_helper_usrinfo(bool task_valid,
685 					     struct audit_buffer *audit_buf)
686 {
687 	const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
688 					    audit_get_loginuid(current) :
689 					    INVALID_UID);
690 	const unsigned int ses = task_valid ? audit_get_sessionid(current) :
691 		AUDIT_SID_UNSET;
692 
693 	audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
694 	audit_log_task_context(audit_buf);
695 }
696 
697 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
698 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
699 			      bool task_valid);
700 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
701 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
702 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
703 				      struct sk_buff *skb);
704 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
705 			     __be32 net_seq);
706 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
707 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
708 			       __be32 net_seq);
709 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
710 			      u8 proto);
711 #else
712 
713 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
714 					 bool task_valid)
715 {
716 }
717 
718 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
719 					    bool task_valid)
720 {
721 }
722 
723 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
724 					bool task_valid)
725 {
726 }
727 
728 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
729 					   bool task_valid)
730 {
731 }
732 
733 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
734 					     struct sk_buff *skb)
735 {
736 }
737 
738 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
739 					   struct sk_buff *skb, __be32 net_seq)
740 {
741 }
742 
743 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
744 				      u16 family)
745 {
746 }
747 
748 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
749 				      __be32 net_spi, __be32 net_seq)
750 {
751 }
752 
753 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
754 				     struct sk_buff *skb, u8 proto)
755 {
756 }
757 #endif /* CONFIG_AUDITSYSCALL */
758 
759 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
760 {
761 	if (likely(policy != NULL))
762 		refcount_inc(&policy->refcnt);
763 }
764 
765 void xfrm_policy_destroy(struct xfrm_policy *policy);
766 
767 static inline void xfrm_pol_put(struct xfrm_policy *policy)
768 {
769 	if (refcount_dec_and_test(&policy->refcnt))
770 		xfrm_policy_destroy(policy);
771 }
772 
773 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
774 {
775 	int i;
776 	for (i = npols - 1; i >= 0; --i)
777 		xfrm_pol_put(pols[i]);
778 }
779 
780 void __xfrm_state_destroy(struct xfrm_state *, bool);
781 
782 static inline void __xfrm_state_put(struct xfrm_state *x)
783 {
784 	refcount_dec(&x->refcnt);
785 }
786 
787 static inline void xfrm_state_put(struct xfrm_state *x)
788 {
789 	if (refcount_dec_and_test(&x->refcnt))
790 		__xfrm_state_destroy(x, false);
791 }
792 
793 static inline void xfrm_state_put_sync(struct xfrm_state *x)
794 {
795 	if (refcount_dec_and_test(&x->refcnt))
796 		__xfrm_state_destroy(x, true);
797 }
798 
799 static inline void xfrm_state_hold(struct xfrm_state *x)
800 {
801 	refcount_inc(&x->refcnt);
802 }
803 
804 static inline bool addr_match(const void *token1, const void *token2,
805 			      unsigned int prefixlen)
806 {
807 	const __be32 *a1 = token1;
808 	const __be32 *a2 = token2;
809 	unsigned int pdw;
810 	unsigned int pbi;
811 
812 	pdw = prefixlen >> 5;	  /* num of whole u32 in prefix */
813 	pbi = prefixlen &  0x1f;  /* num of bits in incomplete u32 in prefix */
814 
815 	if (pdw)
816 		if (memcmp(a1, a2, pdw << 2))
817 			return false;
818 
819 	if (pbi) {
820 		__be32 mask;
821 
822 		mask = htonl((0xffffffff) << (32 - pbi));
823 
824 		if ((a1[pdw] ^ a2[pdw]) & mask)
825 			return false;
826 	}
827 
828 	return true;
829 }
830 
831 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
832 {
833 	/* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
834 	if (sizeof(long) == 4 && prefixlen == 0)
835 		return true;
836 	return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
837 }
838 
839 static __inline__
840 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
841 {
842 	__be16 port;
843 	switch(fl->flowi_proto) {
844 	case IPPROTO_TCP:
845 	case IPPROTO_UDP:
846 	case IPPROTO_UDPLITE:
847 	case IPPROTO_SCTP:
848 		port = uli->ports.sport;
849 		break;
850 	case IPPROTO_ICMP:
851 	case IPPROTO_ICMPV6:
852 		port = htons(uli->icmpt.type);
853 		break;
854 	case IPPROTO_MH:
855 		port = htons(uli->mht.type);
856 		break;
857 	case IPPROTO_GRE:
858 		port = htons(ntohl(uli->gre_key) >> 16);
859 		break;
860 	default:
861 		port = 0;	/*XXX*/
862 	}
863 	return port;
864 }
865 
866 static __inline__
867 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
868 {
869 	__be16 port;
870 	switch(fl->flowi_proto) {
871 	case IPPROTO_TCP:
872 	case IPPROTO_UDP:
873 	case IPPROTO_UDPLITE:
874 	case IPPROTO_SCTP:
875 		port = uli->ports.dport;
876 		break;
877 	case IPPROTO_ICMP:
878 	case IPPROTO_ICMPV6:
879 		port = htons(uli->icmpt.code);
880 		break;
881 	case IPPROTO_GRE:
882 		port = htons(ntohl(uli->gre_key) & 0xffff);
883 		break;
884 	default:
885 		port = 0;	/*XXX*/
886 	}
887 	return port;
888 }
889 
890 bool xfrm_selector_match(const struct xfrm_selector *sel,
891 			 const struct flowi *fl, unsigned short family);
892 
893 #ifdef CONFIG_SECURITY_NETWORK_XFRM
894 /*	If neither has a context --> match
895  * 	Otherwise, both must have a context and the sids, doi, alg must match
896  */
897 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
898 {
899 	return ((!s1 && !s2) ||
900 		(s1 && s2 &&
901 		 (s1->ctx_sid == s2->ctx_sid) &&
902 		 (s1->ctx_doi == s2->ctx_doi) &&
903 		 (s1->ctx_alg == s2->ctx_alg)));
904 }
905 #else
906 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
907 {
908 	return true;
909 }
910 #endif
911 
912 /* A struct encoding bundle of transformations to apply to some set of flow.
913  *
914  * xdst->child points to the next element of bundle.
915  * dst->xfrm  points to an instanse of transformer.
916  *
917  * Due to unfortunate limitations of current routing cache, which we
918  * have no time to fix, it mirrors struct rtable and bound to the same
919  * routing key, including saddr,daddr. However, we can have many of
920  * bundles differing by session id. All the bundles grow from a parent
921  * policy rule.
922  */
923 struct xfrm_dst {
924 	union {
925 		struct dst_entry	dst;
926 		struct rtable		rt;
927 		struct rt6_info		rt6;
928 	} u;
929 	struct dst_entry *route;
930 	struct dst_entry *child;
931 	struct dst_entry *path;
932 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
933 	int num_pols, num_xfrms;
934 	u32 xfrm_genid;
935 	u32 policy_genid;
936 	u32 route_mtu_cached;
937 	u32 child_mtu_cached;
938 	u32 route_cookie;
939 	u32 path_cookie;
940 };
941 
942 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
943 {
944 #ifdef CONFIG_XFRM
945 	if (dst->xfrm) {
946 		const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
947 
948 		return xdst->path;
949 	}
950 #endif
951 	return (struct dst_entry *) dst;
952 }
953 
954 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
955 {
956 #ifdef CONFIG_XFRM
957 	if (dst->xfrm) {
958 		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
959 		return xdst->child;
960 	}
961 #endif
962 	return NULL;
963 }
964 
965 #ifdef CONFIG_XFRM
966 static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
967 {
968 	xdst->child = child;
969 }
970 
971 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
972 {
973 	xfrm_pols_put(xdst->pols, xdst->num_pols);
974 	dst_release(xdst->route);
975 	if (likely(xdst->u.dst.xfrm))
976 		xfrm_state_put(xdst->u.dst.xfrm);
977 }
978 #endif
979 
980 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
981 
982 struct xfrm_if_parms {
983 	int link;		/* ifindex of underlying L2 interface */
984 	u32 if_id;		/* interface identifyer */
985 };
986 
987 struct xfrm_if {
988 	struct xfrm_if __rcu *next;	/* next interface in list */
989 	struct net_device *dev;		/* virtual device associated with interface */
990 	struct net *net;		/* netns for packet i/o */
991 	struct xfrm_if_parms p;		/* interface parms */
992 
993 	struct gro_cells gro_cells;
994 };
995 
996 struct xfrm_offload {
997 	/* Output sequence number for replay protection on offloading. */
998 	struct {
999 		__u32 low;
1000 		__u32 hi;
1001 	} seq;
1002 
1003 	__u32			flags;
1004 #define	SA_DELETE_REQ		1
1005 #define	CRYPTO_DONE		2
1006 #define	CRYPTO_NEXT_DONE	4
1007 #define	CRYPTO_FALLBACK		8
1008 #define	XFRM_GSO_SEGMENT	16
1009 #define	XFRM_GRO		32
1010 #define	XFRM_ESP_NO_TRAILER	64
1011 #define	XFRM_DEV_RESUME		128
1012 #define	XFRM_XMIT		256
1013 
1014 	__u32			status;
1015 #define CRYPTO_SUCCESS				1
1016 #define CRYPTO_GENERIC_ERROR			2
1017 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED		4
1018 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED	8
1019 #define CRYPTO_TUNNEL_AH_AUTH_FAILED		16
1020 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED		32
1021 #define CRYPTO_INVALID_PACKET_SYNTAX		64
1022 #define CRYPTO_INVALID_PROTOCOL			128
1023 
1024 	__u8			proto;
1025 };
1026 
1027 struct sec_path {
1028 	int			len;
1029 	int			olen;
1030 
1031 	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
1032 	struct xfrm_offload	ovec[XFRM_MAX_OFFLOAD_DEPTH];
1033 };
1034 
1035 struct sec_path *secpath_set(struct sk_buff *skb);
1036 
1037 static inline void
1038 secpath_reset(struct sk_buff *skb)
1039 {
1040 #ifdef CONFIG_XFRM
1041 	skb_ext_del(skb, SKB_EXT_SEC_PATH);
1042 #endif
1043 }
1044 
1045 static inline int
1046 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1047 {
1048 	switch (family) {
1049 	case AF_INET:
1050 		return addr->a4 == 0;
1051 	case AF_INET6:
1052 		return ipv6_addr_any(&addr->in6);
1053 	}
1054 	return 0;
1055 }
1056 
1057 static inline int
1058 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1059 {
1060 	return	(tmpl->saddr.a4 &&
1061 		 tmpl->saddr.a4 != x->props.saddr.a4);
1062 }
1063 
1064 static inline int
1065 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1066 {
1067 	return	(!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1068 		 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1069 }
1070 
1071 static inline int
1072 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1073 {
1074 	switch (family) {
1075 	case AF_INET:
1076 		return __xfrm4_state_addr_cmp(tmpl, x);
1077 	case AF_INET6:
1078 		return __xfrm6_state_addr_cmp(tmpl, x);
1079 	}
1080 	return !0;
1081 }
1082 
1083 #ifdef CONFIG_XFRM
1084 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1085 			unsigned short family);
1086 
1087 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1088 				       struct sk_buff *skb,
1089 				       unsigned int family, int reverse)
1090 {
1091 	struct net *net = dev_net(skb->dev);
1092 	int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1093 
1094 	if (sk && sk->sk_policy[XFRM_POLICY_IN])
1095 		return __xfrm_policy_check(sk, ndir, skb, family);
1096 
1097 	return	(!net->xfrm.policy_count[dir] && !secpath_exists(skb)) ||
1098 		(skb_dst(skb)->flags & DST_NOPOLICY) ||
1099 		__xfrm_policy_check(sk, ndir, skb, family);
1100 }
1101 
1102 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1103 {
1104 	return __xfrm_policy_check2(sk, dir, skb, family, 0);
1105 }
1106 
1107 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1108 {
1109 	return xfrm_policy_check(sk, dir, skb, AF_INET);
1110 }
1111 
1112 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1113 {
1114 	return xfrm_policy_check(sk, dir, skb, AF_INET6);
1115 }
1116 
1117 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1118 					     struct sk_buff *skb)
1119 {
1120 	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1121 }
1122 
1123 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1124 					     struct sk_buff *skb)
1125 {
1126 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1127 }
1128 
1129 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1130 			  unsigned int family, int reverse);
1131 
1132 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1133 				      unsigned int family)
1134 {
1135 	return __xfrm_decode_session(skb, fl, family, 0);
1136 }
1137 
1138 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1139 					      struct flowi *fl,
1140 					      unsigned int family)
1141 {
1142 	return __xfrm_decode_session(skb, fl, family, 1);
1143 }
1144 
1145 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1146 
1147 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1148 {
1149 	struct net *net = dev_net(skb->dev);
1150 
1151 	return	!net->xfrm.policy_count[XFRM_POLICY_OUT] ||
1152 		(skb_dst(skb)->flags & DST_NOXFRM) ||
1153 		__xfrm_route_forward(skb, family);
1154 }
1155 
1156 static inline int xfrm4_route_forward(struct sk_buff *skb)
1157 {
1158 	return xfrm_route_forward(skb, AF_INET);
1159 }
1160 
1161 static inline int xfrm6_route_forward(struct sk_buff *skb)
1162 {
1163 	return xfrm_route_forward(skb, AF_INET6);
1164 }
1165 
1166 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1167 
1168 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1169 {
1170 	sk->sk_policy[0] = NULL;
1171 	sk->sk_policy[1] = NULL;
1172 	if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1173 		return __xfrm_sk_clone_policy(sk, osk);
1174 	return 0;
1175 }
1176 
1177 int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1178 
1179 static inline void xfrm_sk_free_policy(struct sock *sk)
1180 {
1181 	struct xfrm_policy *pol;
1182 
1183 	pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1184 	if (unlikely(pol != NULL)) {
1185 		xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1186 		sk->sk_policy[0] = NULL;
1187 	}
1188 	pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1189 	if (unlikely(pol != NULL)) {
1190 		xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1191 		sk->sk_policy[1] = NULL;
1192 	}
1193 }
1194 
1195 #else
1196 
1197 static inline void xfrm_sk_free_policy(struct sock *sk) {}
1198 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
1199 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1200 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1201 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1202 {
1203 	return 1;
1204 }
1205 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1206 {
1207 	return 1;
1208 }
1209 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1210 {
1211 	return 1;
1212 }
1213 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1214 					      struct flowi *fl,
1215 					      unsigned int family)
1216 {
1217 	return -ENOSYS;
1218 }
1219 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1220 					     struct sk_buff *skb)
1221 {
1222 	return 1;
1223 }
1224 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1225 					     struct sk_buff *skb)
1226 {
1227 	return 1;
1228 }
1229 #endif
1230 
1231 static __inline__
1232 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1233 {
1234 	switch (family){
1235 	case AF_INET:
1236 		return (xfrm_address_t *)&fl->u.ip4.daddr;
1237 	case AF_INET6:
1238 		return (xfrm_address_t *)&fl->u.ip6.daddr;
1239 	}
1240 	return NULL;
1241 }
1242 
1243 static __inline__
1244 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1245 {
1246 	switch (family){
1247 	case AF_INET:
1248 		return (xfrm_address_t *)&fl->u.ip4.saddr;
1249 	case AF_INET6:
1250 		return (xfrm_address_t *)&fl->u.ip6.saddr;
1251 	}
1252 	return NULL;
1253 }
1254 
1255 static __inline__
1256 void xfrm_flowi_addr_get(const struct flowi *fl,
1257 			 xfrm_address_t *saddr, xfrm_address_t *daddr,
1258 			 unsigned short family)
1259 {
1260 	switch(family) {
1261 	case AF_INET:
1262 		memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1263 		memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1264 		break;
1265 	case AF_INET6:
1266 		saddr->in6 = fl->u.ip6.saddr;
1267 		daddr->in6 = fl->u.ip6.daddr;
1268 		break;
1269 	}
1270 }
1271 
1272 static __inline__ int
1273 __xfrm4_state_addr_check(const struct xfrm_state *x,
1274 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1275 {
1276 	if (daddr->a4 == x->id.daddr.a4 &&
1277 	    (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1278 		return 1;
1279 	return 0;
1280 }
1281 
1282 static __inline__ int
1283 __xfrm6_state_addr_check(const struct xfrm_state *x,
1284 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1285 {
1286 	if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1287 	    (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1288 	     ipv6_addr_any((struct in6_addr *)saddr) ||
1289 	     ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1290 		return 1;
1291 	return 0;
1292 }
1293 
1294 static __inline__ int
1295 xfrm_state_addr_check(const struct xfrm_state *x,
1296 		      const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1297 		      unsigned short family)
1298 {
1299 	switch (family) {
1300 	case AF_INET:
1301 		return __xfrm4_state_addr_check(x, daddr, saddr);
1302 	case AF_INET6:
1303 		return __xfrm6_state_addr_check(x, daddr, saddr);
1304 	}
1305 	return 0;
1306 }
1307 
1308 static __inline__ int
1309 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1310 			   unsigned short family)
1311 {
1312 	switch (family) {
1313 	case AF_INET:
1314 		return __xfrm4_state_addr_check(x,
1315 						(const xfrm_address_t *)&fl->u.ip4.daddr,
1316 						(const xfrm_address_t *)&fl->u.ip4.saddr);
1317 	case AF_INET6:
1318 		return __xfrm6_state_addr_check(x,
1319 						(const xfrm_address_t *)&fl->u.ip6.daddr,
1320 						(const xfrm_address_t *)&fl->u.ip6.saddr);
1321 	}
1322 	return 0;
1323 }
1324 
1325 static inline int xfrm_state_kern(const struct xfrm_state *x)
1326 {
1327 	return atomic_read(&x->tunnel_users);
1328 }
1329 
1330 static inline bool xfrm_id_proto_valid(u8 proto)
1331 {
1332 	switch (proto) {
1333 	case IPPROTO_AH:
1334 	case IPPROTO_ESP:
1335 	case IPPROTO_COMP:
1336 #if IS_ENABLED(CONFIG_IPV6)
1337 	case IPPROTO_ROUTING:
1338 	case IPPROTO_DSTOPTS:
1339 #endif
1340 		return true;
1341 	default:
1342 		return false;
1343 	}
1344 }
1345 
1346 /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
1347 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1348 {
1349 	return (!userproto || proto == userproto ||
1350 		(userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1351 						  proto == IPPROTO_ESP ||
1352 						  proto == IPPROTO_COMP)));
1353 }
1354 
1355 /*
1356  * xfrm algorithm information
1357  */
1358 struct xfrm_algo_aead_info {
1359 	char *geniv;
1360 	u16 icv_truncbits;
1361 };
1362 
1363 struct xfrm_algo_auth_info {
1364 	u16 icv_truncbits;
1365 	u16 icv_fullbits;
1366 };
1367 
1368 struct xfrm_algo_encr_info {
1369 	char *geniv;
1370 	u16 blockbits;
1371 	u16 defkeybits;
1372 };
1373 
1374 struct xfrm_algo_comp_info {
1375 	u16 threshold;
1376 };
1377 
1378 struct xfrm_algo_desc {
1379 	char *name;
1380 	char *compat;
1381 	u8 available:1;
1382 	u8 pfkey_supported:1;
1383 	union {
1384 		struct xfrm_algo_aead_info aead;
1385 		struct xfrm_algo_auth_info auth;
1386 		struct xfrm_algo_encr_info encr;
1387 		struct xfrm_algo_comp_info comp;
1388 	} uinfo;
1389 	struct sadb_alg desc;
1390 };
1391 
1392 /* XFRM protocol handlers.  */
1393 struct xfrm4_protocol {
1394 	int (*handler)(struct sk_buff *skb);
1395 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1396 			     int encap_type);
1397 	int (*cb_handler)(struct sk_buff *skb, int err);
1398 	int (*err_handler)(struct sk_buff *skb, u32 info);
1399 
1400 	struct xfrm4_protocol __rcu *next;
1401 	int priority;
1402 };
1403 
1404 struct xfrm6_protocol {
1405 	int (*handler)(struct sk_buff *skb);
1406 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1407 			     int encap_type);
1408 	int (*cb_handler)(struct sk_buff *skb, int err);
1409 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1410 			   u8 type, u8 code, int offset, __be32 info);
1411 
1412 	struct xfrm6_protocol __rcu *next;
1413 	int priority;
1414 };
1415 
1416 /* XFRM tunnel handlers.  */
1417 struct xfrm_tunnel {
1418 	int (*handler)(struct sk_buff *skb);
1419 	int (*err_handler)(struct sk_buff *skb, u32 info);
1420 
1421 	struct xfrm_tunnel __rcu *next;
1422 	int priority;
1423 };
1424 
1425 struct xfrm6_tunnel {
1426 	int (*handler)(struct sk_buff *skb);
1427 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1428 			   u8 type, u8 code, int offset, __be32 info);
1429 	struct xfrm6_tunnel __rcu *next;
1430 	int priority;
1431 };
1432 
1433 void xfrm_init(void);
1434 void xfrm4_init(void);
1435 int xfrm_state_init(struct net *net);
1436 void xfrm_state_fini(struct net *net);
1437 void xfrm4_state_init(void);
1438 void xfrm4_protocol_init(void);
1439 #ifdef CONFIG_XFRM
1440 int xfrm6_init(void);
1441 void xfrm6_fini(void);
1442 int xfrm6_state_init(void);
1443 void xfrm6_state_fini(void);
1444 int xfrm6_protocol_init(void);
1445 void xfrm6_protocol_fini(void);
1446 #else
1447 static inline int xfrm6_init(void)
1448 {
1449 	return 0;
1450 }
1451 static inline void xfrm6_fini(void)
1452 {
1453 	;
1454 }
1455 #endif
1456 
1457 #ifdef CONFIG_XFRM_STATISTICS
1458 int xfrm_proc_init(struct net *net);
1459 void xfrm_proc_fini(struct net *net);
1460 #endif
1461 
1462 int xfrm_sysctl_init(struct net *net);
1463 #ifdef CONFIG_SYSCTL
1464 void xfrm_sysctl_fini(struct net *net);
1465 #else
1466 static inline void xfrm_sysctl_fini(struct net *net)
1467 {
1468 }
1469 #endif
1470 
1471 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1472 			  struct xfrm_address_filter *filter);
1473 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1474 		    int (*func)(struct xfrm_state *, int, void*), void *);
1475 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1476 struct xfrm_state *xfrm_state_alloc(struct net *net);
1477 void xfrm_state_free(struct xfrm_state *x);
1478 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1479 				   const xfrm_address_t *saddr,
1480 				   const struct flowi *fl,
1481 				   struct xfrm_tmpl *tmpl,
1482 				   struct xfrm_policy *pol, int *err,
1483 				   unsigned short family, u32 if_id);
1484 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1485 				       xfrm_address_t *daddr,
1486 				       xfrm_address_t *saddr,
1487 				       unsigned short family,
1488 				       u8 mode, u8 proto, u32 reqid);
1489 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1490 					      unsigned short family);
1491 int xfrm_state_check_expire(struct xfrm_state *x);
1492 void xfrm_state_insert(struct xfrm_state *x);
1493 int xfrm_state_add(struct xfrm_state *x);
1494 int xfrm_state_update(struct xfrm_state *x);
1495 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1496 				     const xfrm_address_t *daddr, __be32 spi,
1497 				     u8 proto, unsigned short family);
1498 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1499 					    const xfrm_address_t *daddr,
1500 					    const xfrm_address_t *saddr,
1501 					    u8 proto,
1502 					    unsigned short family);
1503 #ifdef CONFIG_XFRM_SUB_POLICY
1504 void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1505 		    unsigned short family);
1506 void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1507 		     unsigned short family);
1508 #else
1509 static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
1510 				  int n, unsigned short family)
1511 {
1512 }
1513 
1514 static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
1515 				   int n, unsigned short family)
1516 {
1517 }
1518 #endif
1519 
1520 struct xfrmk_sadinfo {
1521 	u32 sadhcnt; /* current hash bkts */
1522 	u32 sadhmcnt; /* max allowed hash bkts */
1523 	u32 sadcnt; /* current running count */
1524 };
1525 
1526 struct xfrmk_spdinfo {
1527 	u32 incnt;
1528 	u32 outcnt;
1529 	u32 fwdcnt;
1530 	u32 inscnt;
1531 	u32 outscnt;
1532 	u32 fwdscnt;
1533 	u32 spdhcnt;
1534 	u32 spdhmcnt;
1535 };
1536 
1537 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1538 int xfrm_state_delete(struct xfrm_state *x);
1539 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1540 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1541 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1542 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1543 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1544 int xfrm_init_replay(struct xfrm_state *x);
1545 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
1546 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
1547 int xfrm_init_state(struct xfrm_state *x);
1548 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1549 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1550 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
1551 			 int (*finish)(struct net *, struct sock *,
1552 				       struct sk_buff *));
1553 int xfrm_trans_queue(struct sk_buff *skb,
1554 		     int (*finish)(struct net *, struct sock *,
1555 				   struct sk_buff *));
1556 int xfrm_output_resume(struct sk_buff *skb, int err);
1557 int xfrm_output(struct sock *sk, struct sk_buff *skb);
1558 
1559 #if IS_ENABLED(CONFIG_NET_PKTGEN)
1560 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
1561 #endif
1562 
1563 void xfrm_local_error(struct sk_buff *skb, int mtu);
1564 int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1565 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1566 		    int encap_type);
1567 int xfrm4_transport_finish(struct sk_buff *skb, int async);
1568 int xfrm4_rcv(struct sk_buff *skb);
1569 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1570 
1571 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1572 {
1573 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1574 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1575 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1576 	return xfrm_input(skb, nexthdr, spi, 0);
1577 }
1578 
1579 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1580 int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
1581 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1582 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1583 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1584 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1585 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1586 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1587 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1588 		  struct ip6_tnl *t);
1589 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1590 		    int encap_type);
1591 int xfrm6_transport_finish(struct sk_buff *skb, int async);
1592 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1593 int xfrm6_rcv(struct sk_buff *skb);
1594 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1595 		     xfrm_address_t *saddr, u8 proto);
1596 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1597 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
1598 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
1599 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1600 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1601 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1602 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1603 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1604 int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
1605 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1606 			  u8 **prevhdr);
1607 
1608 #ifdef CONFIG_XFRM
1609 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
1610 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1611 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1612 int xfrm_user_policy(struct sock *sk, int optname,
1613 		     u8 __user *optval, int optlen);
1614 #else
1615 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1616 {
1617  	return -ENOPROTOOPT;
1618 }
1619 #endif
1620 
1621 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
1622 				    const xfrm_address_t *saddr,
1623 				    const xfrm_address_t *daddr,
1624 				    int family, u32 mark);
1625 
1626 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1627 
1628 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1629 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1630 		     int (*func)(struct xfrm_policy *, int, int, void*),
1631 		     void *);
1632 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1633 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1634 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
1635 					  u8 type, int dir,
1636 					  struct xfrm_selector *sel,
1637 					  struct xfrm_sec_ctx *ctx, int delete,
1638 					  int *err);
1639 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
1640 				     int dir, u32 id, int delete, int *err);
1641 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1642 void xfrm_policy_hash_rebuild(struct net *net);
1643 u32 xfrm_get_acqseq(void);
1644 int verify_spi_info(u8 proto, u32 min, u32 max);
1645 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1646 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1647 				 u8 mode, u32 reqid, u32 if_id, u8 proto,
1648 				 const xfrm_address_t *daddr,
1649 				 const xfrm_address_t *saddr, int create,
1650 				 unsigned short family);
1651 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1652 
1653 #ifdef CONFIG_XFRM_MIGRATE
1654 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1655 	       const struct xfrm_migrate *m, int num_bundles,
1656 	       const struct xfrm_kmaddress *k,
1657 	       const struct xfrm_encap_tmpl *encap);
1658 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net);
1659 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1660 				      struct xfrm_migrate *m,
1661 				      struct xfrm_encap_tmpl *encap);
1662 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1663 		 struct xfrm_migrate *m, int num_bundles,
1664 		 struct xfrm_kmaddress *k, struct net *net,
1665 		 struct xfrm_encap_tmpl *encap);
1666 #endif
1667 
1668 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1669 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1670 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1671 	      xfrm_address_t *addr);
1672 
1673 void xfrm_input_init(void);
1674 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1675 
1676 void xfrm_probe_algs(void);
1677 int xfrm_count_pfkey_auth_supported(void);
1678 int xfrm_count_pfkey_enc_supported(void);
1679 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1680 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1681 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1682 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1683 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1684 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1685 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1686 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1687 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1688 					    int probe);
1689 
1690 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1691 				    const xfrm_address_t *b)
1692 {
1693 	return ipv6_addr_equal((const struct in6_addr *)a,
1694 			       (const struct in6_addr *)b);
1695 }
1696 
1697 static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1698 				   const xfrm_address_t *b,
1699 				   sa_family_t family)
1700 {
1701 	switch (family) {
1702 	default:
1703 	case AF_INET:
1704 		return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1705 	case AF_INET6:
1706 		return xfrm6_addr_equal(a, b);
1707 	}
1708 }
1709 
1710 static inline int xfrm_policy_id2dir(u32 index)
1711 {
1712 	return index & 7;
1713 }
1714 
1715 #ifdef CONFIG_XFRM
1716 static inline int xfrm_aevent_is_on(struct net *net)
1717 {
1718 	struct sock *nlsk;
1719 	int ret = 0;
1720 
1721 	rcu_read_lock();
1722 	nlsk = rcu_dereference(net->xfrm.nlsk);
1723 	if (nlsk)
1724 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1725 	rcu_read_unlock();
1726 	return ret;
1727 }
1728 
1729 static inline int xfrm_acquire_is_on(struct net *net)
1730 {
1731 	struct sock *nlsk;
1732 	int ret = 0;
1733 
1734 	rcu_read_lock();
1735 	nlsk = rcu_dereference(net->xfrm.nlsk);
1736 	if (nlsk)
1737 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1738 	rcu_read_unlock();
1739 
1740 	return ret;
1741 }
1742 #endif
1743 
1744 static inline unsigned int aead_len(struct xfrm_algo_aead *alg)
1745 {
1746 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1747 }
1748 
1749 static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
1750 {
1751 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1752 }
1753 
1754 static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1755 {
1756 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1757 }
1758 
1759 static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
1760 {
1761 	return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
1762 }
1763 
1764 #ifdef CONFIG_XFRM_MIGRATE
1765 static inline int xfrm_replay_clone(struct xfrm_state *x,
1766 				     struct xfrm_state *orig)
1767 {
1768 	x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn),
1769 				GFP_KERNEL);
1770 	if (!x->replay_esn)
1771 		return -ENOMEM;
1772 
1773 	x->replay_esn->bmp_len = orig->replay_esn->bmp_len;
1774 	x->replay_esn->replay_window = orig->replay_esn->replay_window;
1775 
1776 	x->preplay_esn = kmemdup(x->replay_esn,
1777 				 xfrm_replay_state_esn_len(x->replay_esn),
1778 				 GFP_KERNEL);
1779 	if (!x->preplay_esn) {
1780 		kfree(x->replay_esn);
1781 		return -ENOMEM;
1782 	}
1783 
1784 	return 0;
1785 }
1786 
1787 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
1788 {
1789 	return kmemdup(orig, aead_len(orig), GFP_KERNEL);
1790 }
1791 
1792 
1793 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1794 {
1795 	return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1796 }
1797 
1798 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
1799 {
1800 	return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
1801 }
1802 
1803 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1804 {
1805 	int i;
1806 	for (i = 0; i < n; i++)
1807 		xfrm_state_put(*(states + i));
1808 }
1809 
1810 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1811 {
1812 	int i;
1813 	for (i = 0; i < n; i++)
1814 		xfrm_state_delete(*(states + i));
1815 }
1816 #endif
1817 
1818 #ifdef CONFIG_XFRM
1819 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1820 {
1821 	struct sec_path *sp = skb_sec_path(skb);
1822 
1823 	return sp->xvec[sp->len - 1];
1824 }
1825 #endif
1826 
1827 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1828 {
1829 #ifdef CONFIG_XFRM
1830 	struct sec_path *sp = skb_sec_path(skb);
1831 
1832 	if (!sp || !sp->olen || sp->len != sp->olen)
1833 		return NULL;
1834 
1835 	return &sp->ovec[sp->olen - 1];
1836 #else
1837 	return NULL;
1838 #endif
1839 }
1840 
1841 void __init xfrm_dev_init(void);
1842 
1843 #ifdef CONFIG_XFRM_OFFLOAD
1844 void xfrm_dev_resume(struct sk_buff *skb);
1845 void xfrm_dev_backlog(struct softnet_data *sd);
1846 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
1847 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
1848 		       struct xfrm_user_offload *xuo);
1849 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
1850 
1851 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
1852 {
1853 	struct xfrm_state_offload *xso = &x->xso;
1854 
1855 	if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn)
1856 		xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
1857 }
1858 
1859 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
1860 {
1861 	struct xfrm_state *x = dst->xfrm;
1862 	struct xfrm_dst *xdst;
1863 
1864 	if (!x || !x->type_offload)
1865 		return false;
1866 
1867 	xdst = (struct xfrm_dst *) dst;
1868 	if (!x->xso.offload_handle && !xdst->child->xfrm)
1869 		return true;
1870 	if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
1871 	    !xdst->child->xfrm)
1872 		return true;
1873 
1874 	return false;
1875 }
1876 
1877 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
1878 {
1879 	struct xfrm_state_offload *xso = &x->xso;
1880 
1881 	if (xso->dev)
1882 		xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
1883 }
1884 
1885 static inline void xfrm_dev_state_free(struct xfrm_state *x)
1886 {
1887 	struct xfrm_state_offload *xso = &x->xso;
1888 	struct net_device *dev = xso->dev;
1889 
1890 	if (dev && dev->xfrmdev_ops) {
1891 		if (dev->xfrmdev_ops->xdo_dev_state_free)
1892 			dev->xfrmdev_ops->xdo_dev_state_free(x);
1893 		xso->dev = NULL;
1894 		dev_put(dev);
1895 	}
1896 }
1897 #else
1898 static inline void xfrm_dev_resume(struct sk_buff *skb)
1899 {
1900 }
1901 
1902 static inline void xfrm_dev_backlog(struct softnet_data *sd)
1903 {
1904 }
1905 
1906 static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
1907 {
1908 	return skb;
1909 }
1910 
1911 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
1912 {
1913 	return 0;
1914 }
1915 
1916 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
1917 {
1918 }
1919 
1920 static inline void xfrm_dev_state_free(struct xfrm_state *x)
1921 {
1922 }
1923 
1924 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
1925 {
1926 	return false;
1927 }
1928 
1929 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
1930 {
1931 }
1932 
1933 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
1934 {
1935 	return false;
1936 }
1937 #endif
1938 
1939 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
1940 {
1941 	if (attrs[XFRMA_MARK])
1942 		memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
1943 	else
1944 		m->v = m->m = 0;
1945 
1946 	return m->v & m->m;
1947 }
1948 
1949 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
1950 {
1951 	int ret = 0;
1952 
1953 	if (m->m | m->v)
1954 		ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
1955 	return ret;
1956 }
1957 
1958 static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
1959 {
1960 	struct xfrm_mark *m = &x->props.smark;
1961 
1962 	return (m->v & m->m) | (mark & ~m->m);
1963 }
1964 
1965 static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
1966 {
1967 	int ret = 0;
1968 
1969 	if (if_id)
1970 		ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
1971 	return ret;
1972 }
1973 
1974 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
1975 				    unsigned int family)
1976 {
1977 	bool tunnel = false;
1978 
1979 	switch(family) {
1980 	case AF_INET:
1981 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
1982 			tunnel = true;
1983 		break;
1984 	case AF_INET6:
1985 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
1986 			tunnel = true;
1987 		break;
1988 	}
1989 	if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
1990 		return -EINVAL;
1991 
1992 	return 0;
1993 }
1994 
1995 #if IS_ENABLED(CONFIG_IPV6)
1996 static inline bool xfrm6_local_dontfrag(const struct sock *sk)
1997 {
1998 	int proto;
1999 
2000 	if (!sk || sk->sk_family != AF_INET6)
2001 		return false;
2002 
2003 	proto = sk->sk_protocol;
2004 	if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
2005 		return inet6_sk(sk)->dontfrag;
2006 
2007 	return false;
2008 }
2009 #endif
2010 #endif	/* _NET_XFRM_H */
2011