xref: /linux/include/net/xfrm.h (revision c411ed854584a71b0e86ac3019b60e4789d88086)
1 #ifndef _NET_XFRM_H
2 #define _NET_XFRM_H
3 
4 #include <linux/compiler.h>
5 #include <linux/xfrm.h>
6 #include <linux/spinlock.h>
7 #include <linux/list.h>
8 #include <linux/skbuff.h>
9 #include <linux/socket.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/ipsec.h>
12 #include <linux/in6.h>
13 #include <linux/mutex.h>
14 #include <linux/audit.h>
15 #include <linux/slab.h>
16 #include <linux/refcount.h>
17 
18 #include <net/sock.h>
19 #include <net/dst.h>
20 #include <net/ip.h>
21 #include <net/route.h>
22 #include <net/ipv6.h>
23 #include <net/ip6_fib.h>
24 #include <net/flow.h>
25 
26 #include <linux/interrupt.h>
27 
28 #ifdef CONFIG_XFRM_STATISTICS
29 #include <net/snmp.h>
30 #endif
31 
32 #define XFRM_PROTO_ESP		50
33 #define XFRM_PROTO_AH		51
34 #define XFRM_PROTO_COMP		108
35 #define XFRM_PROTO_IPIP		4
36 #define XFRM_PROTO_IPV6		41
37 #define XFRM_PROTO_ROUTING	IPPROTO_ROUTING
38 #define XFRM_PROTO_DSTOPTS	IPPROTO_DSTOPTS
39 
40 #define XFRM_ALIGN4(len)	(((len) + 3) & ~3)
41 #define XFRM_ALIGN8(len)	(((len) + 7) & ~7)
42 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
43 	MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
44 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
45 	MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
46 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
47 	MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
48 
49 #ifdef CONFIG_XFRM_STATISTICS
50 #define XFRM_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
51 #else
52 #define XFRM_INC_STATS(net, field)	((void)(net))
53 #endif
54 
55 
56 /* Organization of SPD aka "XFRM rules"
57    ------------------------------------
58 
59    Basic objects:
60    - policy rule, struct xfrm_policy (=SPD entry)
61    - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
62    - instance of a transformer, struct xfrm_state (=SA)
63    - template to clone xfrm_state, struct xfrm_tmpl
64 
65    SPD is plain linear list of xfrm_policy rules, ordered by priority.
66    (To be compatible with existing pfkeyv2 implementations,
67    many rules with priority of 0x7fffffff are allowed to exist and
68    such rules are ordered in an unpredictable way, thanks to bsd folks.)
69 
70    Lookup is plain linear search until the first match with selector.
71 
72    If "action" is "block", then we prohibit the flow, otherwise:
73    if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
74    policy entry has list of up to XFRM_MAX_DEPTH transformations,
75    described by templates xfrm_tmpl. Each template is resolved
76    to a complete xfrm_state (see below) and we pack bundle of transformations
77    to a dst_entry returned to requestor.
78 
79    dst -. xfrm  .-> xfrm_state #1
80     |---. child .-> dst -. xfrm .-> xfrm_state #2
81                      |---. child .-> dst -. xfrm .-> xfrm_state #3
82                                       |---. child .-> NULL
83 
84    Bundles are cached at xrfm_policy struct (field ->bundles).
85 
86 
87    Resolution of xrfm_tmpl
88    -----------------------
89    Template contains:
90    1. ->mode		Mode: transport or tunnel
91    2. ->id.proto	Protocol: AH/ESP/IPCOMP
92    3. ->id.daddr	Remote tunnel endpoint, ignored for transport mode.
93       Q: allow to resolve security gateway?
94    4. ->id.spi          If not zero, static SPI.
95    5. ->saddr		Local tunnel endpoint, ignored for transport mode.
96    6. ->algos		List of allowed algos. Plain bitmask now.
97       Q: ealgos, aalgos, calgos. What a mess...
98    7. ->share		Sharing mode.
99       Q: how to implement private sharing mode? To add struct sock* to
100       flow id?
101 
102    Having this template we search through SAD searching for entries
103    with appropriate mode/proto/algo, permitted by selector.
104    If no appropriate entry found, it is requested from key manager.
105 
106    PROBLEMS:
107    Q: How to find all the bundles referring to a physical path for
108       PMTU discovery? Seems, dst should contain list of all parents...
109       and enter to infinite locking hierarchy disaster.
110       No! It is easier, we will not search for them, let them find us.
111       We add genid to each dst plus pointer to genid of raw IP route,
112       pmtu disc will update pmtu on raw IP route and increase its genid.
113       dst_check() will see this for top level and trigger resyncing
114       metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
115  */
116 
117 struct xfrm_state_walk {
118 	struct list_head	all;
119 	u8			state;
120 	u8			dying;
121 	u8			proto;
122 	u32			seq;
123 	struct xfrm_address_filter *filter;
124 };
125 
126 struct xfrm_state_offload {
127 	struct net_device	*dev;
128 	unsigned long		offload_handle;
129 	unsigned int		num_exthdrs;
130 	u8			flags;
131 };
132 
133 /* Full description of state of transformer. */
134 struct xfrm_state {
135 	possible_net_t		xs_net;
136 	union {
137 		struct hlist_node	gclist;
138 		struct hlist_node	bydst;
139 	};
140 	struct hlist_node	bysrc;
141 	struct hlist_node	byspi;
142 
143 	refcount_t		refcnt;
144 	spinlock_t		lock;
145 
146 	struct xfrm_id		id;
147 	struct xfrm_selector	sel;
148 	struct xfrm_mark	mark;
149 	u32			tfcpad;
150 
151 	u32			genid;
152 
153 	/* Key manager bits */
154 	struct xfrm_state_walk	km;
155 
156 	/* Parameters of this state. */
157 	struct {
158 		u32		reqid;
159 		u8		mode;
160 		u8		replay_window;
161 		u8		aalgo, ealgo, calgo;
162 		u8		flags;
163 		u16		family;
164 		xfrm_address_t	saddr;
165 		int		header_len;
166 		int		trailer_len;
167 		u32		extra_flags;
168 		u32		output_mark;
169 	} props;
170 
171 	struct xfrm_lifetime_cfg lft;
172 
173 	/* Data for transformer */
174 	struct xfrm_algo_auth	*aalg;
175 	struct xfrm_algo	*ealg;
176 	struct xfrm_algo	*calg;
177 	struct xfrm_algo_aead	*aead;
178 	const char		*geniv;
179 
180 	/* Data for encapsulator */
181 	struct xfrm_encap_tmpl	*encap;
182 
183 	/* Data for care-of address */
184 	xfrm_address_t	*coaddr;
185 
186 	/* IPComp needs an IPIP tunnel for handling uncompressed packets */
187 	struct xfrm_state	*tunnel;
188 
189 	/* If a tunnel, number of users + 1 */
190 	atomic_t		tunnel_users;
191 
192 	/* State for replay detection */
193 	struct xfrm_replay_state replay;
194 	struct xfrm_replay_state_esn *replay_esn;
195 
196 	/* Replay detection state at the time we sent the last notification */
197 	struct xfrm_replay_state preplay;
198 	struct xfrm_replay_state_esn *preplay_esn;
199 
200 	/* The functions for replay detection. */
201 	const struct xfrm_replay *repl;
202 
203 	/* internal flag that only holds state for delayed aevent at the
204 	 * moment
205 	*/
206 	u32			xflags;
207 
208 	/* Replay detection notification settings */
209 	u32			replay_maxage;
210 	u32			replay_maxdiff;
211 
212 	/* Replay detection notification timer */
213 	struct timer_list	rtimer;
214 
215 	/* Statistics */
216 	struct xfrm_stats	stats;
217 
218 	struct xfrm_lifetime_cur curlft;
219 	struct tasklet_hrtimer	mtimer;
220 
221 	struct xfrm_state_offload xso;
222 
223 	/* used to fix curlft->add_time when changing date */
224 	long		saved_tmo;
225 
226 	/* Last used time */
227 	unsigned long		lastused;
228 
229 	struct page_frag xfrag;
230 
231 	/* Reference to data common to all the instances of this
232 	 * transformer. */
233 	const struct xfrm_type	*type;
234 	struct xfrm_mode	*inner_mode;
235 	struct xfrm_mode	*inner_mode_iaf;
236 	struct xfrm_mode	*outer_mode;
237 
238 	const struct xfrm_type_offload	*type_offload;
239 
240 	/* Security context */
241 	struct xfrm_sec_ctx	*security;
242 
243 	/* Private data of this transformer, format is opaque,
244 	 * interpreted by xfrm_type methods. */
245 	void			*data;
246 };
247 
248 static inline struct net *xs_net(struct xfrm_state *x)
249 {
250 	return read_pnet(&x->xs_net);
251 }
252 
253 /* xflags - make enum if more show up */
254 #define XFRM_TIME_DEFER	1
255 #define XFRM_SOFT_EXPIRE 2
256 
257 enum {
258 	XFRM_STATE_VOID,
259 	XFRM_STATE_ACQ,
260 	XFRM_STATE_VALID,
261 	XFRM_STATE_ERROR,
262 	XFRM_STATE_EXPIRED,
263 	XFRM_STATE_DEAD
264 };
265 
266 /* callback structure passed from either netlink or pfkey */
267 struct km_event {
268 	union {
269 		u32 hard;
270 		u32 proto;
271 		u32 byid;
272 		u32 aevent;
273 		u32 type;
274 	} data;
275 
276 	u32	seq;
277 	u32	portid;
278 	u32	event;
279 	struct net *net;
280 };
281 
282 struct xfrm_replay {
283 	void	(*advance)(struct xfrm_state *x, __be32 net_seq);
284 	int	(*check)(struct xfrm_state *x,
285 			 struct sk_buff *skb,
286 			 __be32 net_seq);
287 	int	(*recheck)(struct xfrm_state *x,
288 			   struct sk_buff *skb,
289 			   __be32 net_seq);
290 	void	(*notify)(struct xfrm_state *x, int event);
291 	int	(*overflow)(struct xfrm_state *x, struct sk_buff *skb);
292 };
293 
294 struct net_device;
295 struct xfrm_type;
296 struct xfrm_dst;
297 struct xfrm_policy_afinfo {
298 	struct dst_ops		*dst_ops;
299 	struct dst_entry	*(*dst_lookup)(struct net *net,
300 					       int tos, int oif,
301 					       const xfrm_address_t *saddr,
302 					       const xfrm_address_t *daddr,
303 					       u32 mark);
304 	int			(*get_saddr)(struct net *net, int oif,
305 					     xfrm_address_t *saddr,
306 					     xfrm_address_t *daddr,
307 					     u32 mark);
308 	void			(*decode_session)(struct sk_buff *skb,
309 						  struct flowi *fl,
310 						  int reverse);
311 	int			(*get_tos)(const struct flowi *fl);
312 	int			(*init_path)(struct xfrm_dst *path,
313 					     struct dst_entry *dst,
314 					     int nfheader_len);
315 	int			(*fill_dst)(struct xfrm_dst *xdst,
316 					    struct net_device *dev,
317 					    const struct flowi *fl);
318 	struct dst_entry	*(*blackhole_route)(struct net *net, struct dst_entry *orig);
319 };
320 
321 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
322 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
323 void km_policy_notify(struct xfrm_policy *xp, int dir,
324 		      const struct km_event *c);
325 void xfrm_policy_cache_flush(void);
326 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
327 
328 struct xfrm_tmpl;
329 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
330 	     struct xfrm_policy *pol);
331 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
332 int __xfrm_state_delete(struct xfrm_state *x);
333 
334 struct xfrm_state_afinfo {
335 	unsigned int			family;
336 	unsigned int			proto;
337 	__be16				eth_proto;
338 	struct module			*owner;
339 	const struct xfrm_type		*type_map[IPPROTO_MAX];
340 	const struct xfrm_type_offload	*type_offload_map[IPPROTO_MAX];
341 	struct xfrm_mode		*mode_map[XFRM_MODE_MAX];
342 
343 	int			(*init_flags)(struct xfrm_state *x);
344 	void			(*init_tempsel)(struct xfrm_selector *sel,
345 						const struct flowi *fl);
346 	void			(*init_temprop)(struct xfrm_state *x,
347 						const struct xfrm_tmpl *tmpl,
348 						const xfrm_address_t *daddr,
349 						const xfrm_address_t *saddr);
350 	int			(*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
351 	int			(*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
352 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
353 	int			(*output_finish)(struct sock *sk, struct sk_buff *skb);
354 	int			(*extract_input)(struct xfrm_state *x,
355 						 struct sk_buff *skb);
356 	int			(*extract_output)(struct xfrm_state *x,
357 						  struct sk_buff *skb);
358 	int			(*transport_finish)(struct sk_buff *skb,
359 						    int async);
360 	void			(*local_error)(struct sk_buff *skb, u32 mtu);
361 };
362 
363 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
364 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
365 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
366 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
367 
368 struct xfrm_input_afinfo {
369 	unsigned int		family;
370 	int			(*callback)(struct sk_buff *skb, u8 protocol,
371 					    int err);
372 };
373 
374 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
375 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
376 
377 void xfrm_state_delete_tunnel(struct xfrm_state *x);
378 
379 struct xfrm_type {
380 	char			*description;
381 	struct module		*owner;
382 	u8			proto;
383 	u8			flags;
384 #define XFRM_TYPE_NON_FRAGMENT	1
385 #define XFRM_TYPE_REPLAY_PROT	2
386 #define XFRM_TYPE_LOCAL_COADDR	4
387 #define XFRM_TYPE_REMOTE_COADDR	8
388 
389 	int			(*init_state)(struct xfrm_state *x);
390 	void			(*destructor)(struct xfrm_state *);
391 	int			(*input)(struct xfrm_state *, struct sk_buff *skb);
392 	int			(*output)(struct xfrm_state *, struct sk_buff *pskb);
393 	int			(*reject)(struct xfrm_state *, struct sk_buff *,
394 					  const struct flowi *);
395 	int			(*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
396 	/* Estimate maximal size of result of transformation of a dgram */
397 	u32			(*get_mtu)(struct xfrm_state *, int size);
398 };
399 
400 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
401 int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
402 
403 struct xfrm_type_offload {
404 	char		*description;
405 	struct module	*owner;
406 	u8		proto;
407 	void		(*encap)(struct xfrm_state *, struct sk_buff *pskb);
408 	int		(*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
409 	int		(*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
410 };
411 
412 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
413 int xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
414 
415 struct xfrm_mode {
416 	/*
417 	 * Remove encapsulation header.
418 	 *
419 	 * The IP header will be moved over the top of the encapsulation
420 	 * header.
421 	 *
422 	 * On entry, the transport header shall point to where the IP header
423 	 * should be and the network header shall be set to where the IP
424 	 * header currently is.  skb->data shall point to the start of the
425 	 * payload.
426 	 */
427 	int (*input2)(struct xfrm_state *x, struct sk_buff *skb);
428 
429 	/*
430 	 * This is the actual input entry point.
431 	 *
432 	 * For transport mode and equivalent this would be identical to
433 	 * input2 (which does not need to be set).  While tunnel mode
434 	 * and equivalent would set this to the tunnel encapsulation function
435 	 * xfrm4_prepare_input that would in turn call input2.
436 	 */
437 	int (*input)(struct xfrm_state *x, struct sk_buff *skb);
438 
439 	/*
440 	 * Add encapsulation header.
441 	 *
442 	 * On exit, the transport header will be set to the start of the
443 	 * encapsulation header to be filled in by x->type->output and
444 	 * the mac header will be set to the nextheader (protocol for
445 	 * IPv4) field of the extension header directly preceding the
446 	 * encapsulation header, or in its absence, that of the top IP
447 	 * header.  The value of the network header will always point
448 	 * to the top IP header while skb->data will point to the payload.
449 	 */
450 	int (*output2)(struct xfrm_state *x,struct sk_buff *skb);
451 
452 	/*
453 	 * This is the actual output entry point.
454 	 *
455 	 * For transport mode and equivalent this would be identical to
456 	 * output2 (which does not need to be set).  While tunnel mode
457 	 * and equivalent would set this to a tunnel encapsulation function
458 	 * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn
459 	 * call output2.
460 	 */
461 	int (*output)(struct xfrm_state *x, struct sk_buff *skb);
462 
463 	/*
464 	 * Adjust pointers into the packet and do GSO segmentation.
465 	 */
466 	struct sk_buff *(*gso_segment)(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features);
467 
468 	/*
469 	 * Adjust pointers into the packet when IPsec is done at layer2.
470 	 */
471 	void (*xmit)(struct xfrm_state *x, struct sk_buff *skb);
472 
473 	struct xfrm_state_afinfo *afinfo;
474 	struct module *owner;
475 	unsigned int encap;
476 	int flags;
477 };
478 
479 /* Flags for xfrm_mode. */
480 enum {
481 	XFRM_MODE_FLAG_TUNNEL = 1,
482 };
483 
484 int xfrm_register_mode(struct xfrm_mode *mode, int family);
485 int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
486 
487 static inline int xfrm_af2proto(unsigned int family)
488 {
489 	switch(family) {
490 	case AF_INET:
491 		return IPPROTO_IPIP;
492 	case AF_INET6:
493 		return IPPROTO_IPV6;
494 	default:
495 		return 0;
496 	}
497 }
498 
499 static inline struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
500 {
501 	if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
502 	    (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
503 		return x->inner_mode;
504 	else
505 		return x->inner_mode_iaf;
506 }
507 
508 struct xfrm_tmpl {
509 /* id in template is interpreted as:
510  * daddr - destination of tunnel, may be zero for transport mode.
511  * spi   - zero to acquire spi. Not zero if spi is static, then
512  *	   daddr must be fixed too.
513  * proto - AH/ESP/IPCOMP
514  */
515 	struct xfrm_id		id;
516 
517 /* Source address of tunnel. Ignored, if it is not a tunnel. */
518 	xfrm_address_t		saddr;
519 
520 	unsigned short		encap_family;
521 
522 	u32			reqid;
523 
524 /* Mode: transport, tunnel etc. */
525 	u8			mode;
526 
527 /* Sharing mode: unique, this session only, this user only etc. */
528 	u8			share;
529 
530 /* May skip this transfomration if no SA is found */
531 	u8			optional;
532 
533 /* Skip aalgos/ealgos/calgos checks. */
534 	u8			allalgs;
535 
536 /* Bit mask of algos allowed for acquisition */
537 	u32			aalgos;
538 	u32			ealgos;
539 	u32			calgos;
540 };
541 
542 #define XFRM_MAX_DEPTH		6
543 #define XFRM_MAX_OFFLOAD_DEPTH	1
544 
545 struct xfrm_policy_walk_entry {
546 	struct list_head	all;
547 	u8			dead;
548 };
549 
550 struct xfrm_policy_walk {
551 	struct xfrm_policy_walk_entry walk;
552 	u8 type;
553 	u32 seq;
554 };
555 
556 struct xfrm_policy_queue {
557 	struct sk_buff_head	hold_queue;
558 	struct timer_list	hold_timer;
559 	unsigned long		timeout;
560 };
561 
562 struct xfrm_policy {
563 	possible_net_t		xp_net;
564 	struct hlist_node	bydst;
565 	struct hlist_node	byidx;
566 
567 	/* This lock only affects elements except for entry. */
568 	rwlock_t		lock;
569 	refcount_t		refcnt;
570 	struct timer_list	timer;
571 
572 	atomic_t		genid;
573 	u32			priority;
574 	u32			index;
575 	struct xfrm_mark	mark;
576 	struct xfrm_selector	selector;
577 	struct xfrm_lifetime_cfg lft;
578 	struct xfrm_lifetime_cur curlft;
579 	struct xfrm_policy_walk_entry walk;
580 	struct xfrm_policy_queue polq;
581 	u8			type;
582 	u8			action;
583 	u8			flags;
584 	u8			xfrm_nr;
585 	u16			family;
586 	struct xfrm_sec_ctx	*security;
587 	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
588 	struct rcu_head		rcu;
589 };
590 
591 static inline struct net *xp_net(const struct xfrm_policy *xp)
592 {
593 	return read_pnet(&xp->xp_net);
594 }
595 
596 struct xfrm_kmaddress {
597 	xfrm_address_t          local;
598 	xfrm_address_t          remote;
599 	u32			reserved;
600 	u16			family;
601 };
602 
603 struct xfrm_migrate {
604 	xfrm_address_t		old_daddr;
605 	xfrm_address_t		old_saddr;
606 	xfrm_address_t		new_daddr;
607 	xfrm_address_t		new_saddr;
608 	u8			proto;
609 	u8			mode;
610 	u16			reserved;
611 	u32			reqid;
612 	u16			old_family;
613 	u16			new_family;
614 };
615 
616 #define XFRM_KM_TIMEOUT                30
617 /* what happened */
618 #define XFRM_REPLAY_UPDATE	XFRM_AE_CR
619 #define XFRM_REPLAY_TIMEOUT	XFRM_AE_CE
620 
621 /* default aevent timeout in units of 100ms */
622 #define XFRM_AE_ETIME			10
623 /* Async Event timer multiplier */
624 #define XFRM_AE_ETH_M			10
625 /* default seq threshold size */
626 #define XFRM_AE_SEQT_SIZE		2
627 
628 struct xfrm_mgr {
629 	struct list_head	list;
630 	int			(*notify)(struct xfrm_state *x, const struct km_event *c);
631 	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
632 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
633 	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
634 	int			(*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
635 	int			(*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
636 	int			(*migrate)(const struct xfrm_selector *sel,
637 					   u8 dir, u8 type,
638 					   const struct xfrm_migrate *m,
639 					   int num_bundles,
640 					   const struct xfrm_kmaddress *k,
641 					   const struct xfrm_encap_tmpl *encap);
642 	bool			(*is_alive)(const struct km_event *c);
643 };
644 
645 int xfrm_register_km(struct xfrm_mgr *km);
646 int xfrm_unregister_km(struct xfrm_mgr *km);
647 
648 struct xfrm_tunnel_skb_cb {
649 	union {
650 		struct inet_skb_parm h4;
651 		struct inet6_skb_parm h6;
652 	} header;
653 
654 	union {
655 		struct ip_tunnel *ip4;
656 		struct ip6_tnl *ip6;
657 	} tunnel;
658 };
659 
660 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
661 
662 /*
663  * This structure is used for the duration where packets are being
664  * transformed by IPsec.  As soon as the packet leaves IPsec the
665  * area beyond the generic IP part may be overwritten.
666  */
667 struct xfrm_skb_cb {
668 	struct xfrm_tunnel_skb_cb header;
669 
670         /* Sequence number for replay protection. */
671 	union {
672 		struct {
673 			__u32 low;
674 			__u32 hi;
675 		} output;
676 		struct {
677 			__be32 low;
678 			__be32 hi;
679 		} input;
680 	} seq;
681 };
682 
683 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
684 
685 /*
686  * This structure is used by the afinfo prepare_input/prepare_output functions
687  * to transmit header information to the mode input/output functions.
688  */
689 struct xfrm_mode_skb_cb {
690 	struct xfrm_tunnel_skb_cb header;
691 
692 	/* Copied from header for IPv4, always set to zero and DF for IPv6. */
693 	__be16 id;
694 	__be16 frag_off;
695 
696 	/* IP header length (excluding options or extension headers). */
697 	u8 ihl;
698 
699 	/* TOS for IPv4, class for IPv6. */
700 	u8 tos;
701 
702 	/* TTL for IPv4, hop limitfor IPv6. */
703 	u8 ttl;
704 
705 	/* Protocol for IPv4, NH for IPv6. */
706 	u8 protocol;
707 
708 	/* Option length for IPv4, zero for IPv6. */
709 	u8 optlen;
710 
711 	/* Used by IPv6 only, zero for IPv4. */
712 	u8 flow_lbl[3];
713 };
714 
715 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
716 
717 /*
718  * This structure is used by the input processing to locate the SPI and
719  * related information.
720  */
721 struct xfrm_spi_skb_cb {
722 	struct xfrm_tunnel_skb_cb header;
723 
724 	unsigned int daddroff;
725 	unsigned int family;
726 	__be32 seq;
727 };
728 
729 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
730 
731 #ifdef CONFIG_AUDITSYSCALL
732 static inline struct audit_buffer *xfrm_audit_start(const char *op)
733 {
734 	struct audit_buffer *audit_buf = NULL;
735 
736 	if (audit_enabled == 0)
737 		return NULL;
738 	audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
739 				    AUDIT_MAC_IPSEC_EVENT);
740 	if (audit_buf == NULL)
741 		return NULL;
742 	audit_log_format(audit_buf, "op=%s", op);
743 	return audit_buf;
744 }
745 
746 static inline void xfrm_audit_helper_usrinfo(bool task_valid,
747 					     struct audit_buffer *audit_buf)
748 {
749 	const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
750 					    audit_get_loginuid(current) :
751 					    INVALID_UID);
752 	const unsigned int ses = task_valid ? audit_get_sessionid(current) :
753 		(unsigned int) -1;
754 
755 	audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
756 	audit_log_task_context(audit_buf);
757 }
758 
759 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
760 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
761 			      bool task_valid);
762 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
763 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
764 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
765 				      struct sk_buff *skb);
766 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
767 			     __be32 net_seq);
768 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
769 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
770 			       __be32 net_seq);
771 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
772 			      u8 proto);
773 #else
774 
775 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
776 					 bool task_valid)
777 {
778 }
779 
780 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
781 					    bool task_valid)
782 {
783 }
784 
785 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
786 					bool task_valid)
787 {
788 }
789 
790 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
791 					   bool task_valid)
792 {
793 }
794 
795 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
796 					     struct sk_buff *skb)
797 {
798 }
799 
800 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
801 					   struct sk_buff *skb, __be32 net_seq)
802 {
803 }
804 
805 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
806 				      u16 family)
807 {
808 }
809 
810 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
811 				      __be32 net_spi, __be32 net_seq)
812 {
813 }
814 
815 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
816 				     struct sk_buff *skb, u8 proto)
817 {
818 }
819 #endif /* CONFIG_AUDITSYSCALL */
820 
821 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
822 {
823 	if (likely(policy != NULL))
824 		refcount_inc(&policy->refcnt);
825 }
826 
827 void xfrm_policy_destroy(struct xfrm_policy *policy);
828 
829 static inline void xfrm_pol_put(struct xfrm_policy *policy)
830 {
831 	if (refcount_dec_and_test(&policy->refcnt))
832 		xfrm_policy_destroy(policy);
833 }
834 
835 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
836 {
837 	int i;
838 	for (i = npols - 1; i >= 0; --i)
839 		xfrm_pol_put(pols[i]);
840 }
841 
842 void __xfrm_state_destroy(struct xfrm_state *);
843 
844 static inline void __xfrm_state_put(struct xfrm_state *x)
845 {
846 	refcount_dec(&x->refcnt);
847 }
848 
849 static inline void xfrm_state_put(struct xfrm_state *x)
850 {
851 	if (refcount_dec_and_test(&x->refcnt))
852 		__xfrm_state_destroy(x);
853 }
854 
855 static inline void xfrm_state_hold(struct xfrm_state *x)
856 {
857 	refcount_inc(&x->refcnt);
858 }
859 
860 static inline bool addr_match(const void *token1, const void *token2,
861 			      unsigned int prefixlen)
862 {
863 	const __be32 *a1 = token1;
864 	const __be32 *a2 = token2;
865 	unsigned int pdw;
866 	unsigned int pbi;
867 
868 	pdw = prefixlen >> 5;	  /* num of whole u32 in prefix */
869 	pbi = prefixlen &  0x1f;  /* num of bits in incomplete u32 in prefix */
870 
871 	if (pdw)
872 		if (memcmp(a1, a2, pdw << 2))
873 			return false;
874 
875 	if (pbi) {
876 		__be32 mask;
877 
878 		mask = htonl((0xffffffff) << (32 - pbi));
879 
880 		if ((a1[pdw] ^ a2[pdw]) & mask)
881 			return false;
882 	}
883 
884 	return true;
885 }
886 
887 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
888 {
889 	/* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
890 	if (sizeof(long) == 4 && prefixlen == 0)
891 		return true;
892 	return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
893 }
894 
895 static __inline__
896 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
897 {
898 	__be16 port;
899 	switch(fl->flowi_proto) {
900 	case IPPROTO_TCP:
901 	case IPPROTO_UDP:
902 	case IPPROTO_UDPLITE:
903 	case IPPROTO_SCTP:
904 		port = uli->ports.sport;
905 		break;
906 	case IPPROTO_ICMP:
907 	case IPPROTO_ICMPV6:
908 		port = htons(uli->icmpt.type);
909 		break;
910 	case IPPROTO_MH:
911 		port = htons(uli->mht.type);
912 		break;
913 	case IPPROTO_GRE:
914 		port = htons(ntohl(uli->gre_key) >> 16);
915 		break;
916 	default:
917 		port = 0;	/*XXX*/
918 	}
919 	return port;
920 }
921 
922 static __inline__
923 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
924 {
925 	__be16 port;
926 	switch(fl->flowi_proto) {
927 	case IPPROTO_TCP:
928 	case IPPROTO_UDP:
929 	case IPPROTO_UDPLITE:
930 	case IPPROTO_SCTP:
931 		port = uli->ports.dport;
932 		break;
933 	case IPPROTO_ICMP:
934 	case IPPROTO_ICMPV6:
935 		port = htons(uli->icmpt.code);
936 		break;
937 	case IPPROTO_GRE:
938 		port = htons(ntohl(uli->gre_key) & 0xffff);
939 		break;
940 	default:
941 		port = 0;	/*XXX*/
942 	}
943 	return port;
944 }
945 
946 bool xfrm_selector_match(const struct xfrm_selector *sel,
947 			 const struct flowi *fl, unsigned short family);
948 
949 #ifdef CONFIG_SECURITY_NETWORK_XFRM
950 /*	If neither has a context --> match
951  * 	Otherwise, both must have a context and the sids, doi, alg must match
952  */
953 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
954 {
955 	return ((!s1 && !s2) ||
956 		(s1 && s2 &&
957 		 (s1->ctx_sid == s2->ctx_sid) &&
958 		 (s1->ctx_doi == s2->ctx_doi) &&
959 		 (s1->ctx_alg == s2->ctx_alg)));
960 }
961 #else
962 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
963 {
964 	return true;
965 }
966 #endif
967 
968 /* A struct encoding bundle of transformations to apply to some set of flow.
969  *
970  * dst->child points to the next element of bundle.
971  * dst->xfrm  points to an instanse of transformer.
972  *
973  * Due to unfortunate limitations of current routing cache, which we
974  * have no time to fix, it mirrors struct rtable and bound to the same
975  * routing key, including saddr,daddr. However, we can have many of
976  * bundles differing by session id. All the bundles grow from a parent
977  * policy rule.
978  */
979 struct xfrm_dst {
980 	union {
981 		struct dst_entry	dst;
982 		struct rtable		rt;
983 		struct rt6_info		rt6;
984 	} u;
985 	struct dst_entry *route;
986 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
987 	int num_pols, num_xfrms;
988 	u32 xfrm_genid;
989 	u32 policy_genid;
990 	u32 route_mtu_cached;
991 	u32 child_mtu_cached;
992 	u32 route_cookie;
993 	u32 path_cookie;
994 };
995 
996 #ifdef CONFIG_XFRM
997 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
998 {
999 	xfrm_pols_put(xdst->pols, xdst->num_pols);
1000 	dst_release(xdst->route);
1001 	if (likely(xdst->u.dst.xfrm))
1002 		xfrm_state_put(xdst->u.dst.xfrm);
1003 }
1004 #endif
1005 
1006 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
1007 
1008 struct xfrm_offload {
1009 	/* Output sequence number for replay protection on offloading. */
1010 	struct {
1011 		__u32 low;
1012 		__u32 hi;
1013 	} seq;
1014 
1015 	__u32			flags;
1016 #define	SA_DELETE_REQ		1
1017 #define	CRYPTO_DONE		2
1018 #define	CRYPTO_NEXT_DONE	4
1019 #define	CRYPTO_FALLBACK		8
1020 #define	XFRM_GSO_SEGMENT	16
1021 #define	XFRM_GRO		32
1022 
1023 	__u32			status;
1024 #define CRYPTO_SUCCESS				1
1025 #define CRYPTO_GENERIC_ERROR			2
1026 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED		4
1027 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED	8
1028 #define CRYPTO_TUNNEL_AH_AUTH_FAILED		16
1029 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED		32
1030 #define CRYPTO_INVALID_PACKET_SYNTAX		64
1031 #define CRYPTO_INVALID_PROTOCOL			128
1032 
1033 	__u8			proto;
1034 };
1035 
1036 struct sec_path {
1037 	refcount_t		refcnt;
1038 	int			len;
1039 	int			olen;
1040 
1041 	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
1042 	struct xfrm_offload	ovec[XFRM_MAX_OFFLOAD_DEPTH];
1043 };
1044 
1045 static inline int secpath_exists(struct sk_buff *skb)
1046 {
1047 #ifdef CONFIG_XFRM
1048 	return skb->sp != NULL;
1049 #else
1050 	return 0;
1051 #endif
1052 }
1053 
1054 static inline struct sec_path *
1055 secpath_get(struct sec_path *sp)
1056 {
1057 	if (sp)
1058 		refcount_inc(&sp->refcnt);
1059 	return sp;
1060 }
1061 
1062 void __secpath_destroy(struct sec_path *sp);
1063 
1064 static inline void
1065 secpath_put(struct sec_path *sp)
1066 {
1067 	if (sp && refcount_dec_and_test(&sp->refcnt))
1068 		__secpath_destroy(sp);
1069 }
1070 
1071 struct sec_path *secpath_dup(struct sec_path *src);
1072 int secpath_set(struct sk_buff *skb);
1073 
1074 static inline void
1075 secpath_reset(struct sk_buff *skb)
1076 {
1077 #ifdef CONFIG_XFRM
1078 	secpath_put(skb->sp);
1079 	skb->sp = NULL;
1080 #endif
1081 }
1082 
1083 static inline int
1084 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1085 {
1086 	switch (family) {
1087 	case AF_INET:
1088 		return addr->a4 == 0;
1089 	case AF_INET6:
1090 		return ipv6_addr_any(&addr->in6);
1091 	}
1092 	return 0;
1093 }
1094 
1095 static inline int
1096 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1097 {
1098 	return	(tmpl->saddr.a4 &&
1099 		 tmpl->saddr.a4 != x->props.saddr.a4);
1100 }
1101 
1102 static inline int
1103 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1104 {
1105 	return	(!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1106 		 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1107 }
1108 
1109 static inline int
1110 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1111 {
1112 	switch (family) {
1113 	case AF_INET:
1114 		return __xfrm4_state_addr_cmp(tmpl, x);
1115 	case AF_INET6:
1116 		return __xfrm6_state_addr_cmp(tmpl, x);
1117 	}
1118 	return !0;
1119 }
1120 
1121 #ifdef CONFIG_XFRM
1122 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1123 			unsigned short family);
1124 
1125 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1126 				       struct sk_buff *skb,
1127 				       unsigned int family, int reverse)
1128 {
1129 	struct net *net = dev_net(skb->dev);
1130 	int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1131 
1132 	if (sk && sk->sk_policy[XFRM_POLICY_IN])
1133 		return __xfrm_policy_check(sk, ndir, skb, family);
1134 
1135 	return	(!net->xfrm.policy_count[dir] && !skb->sp) ||
1136 		(skb_dst(skb)->flags & DST_NOPOLICY) ||
1137 		__xfrm_policy_check(sk, ndir, skb, family);
1138 }
1139 
1140 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1141 {
1142 	return __xfrm_policy_check2(sk, dir, skb, family, 0);
1143 }
1144 
1145 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1146 {
1147 	return xfrm_policy_check(sk, dir, skb, AF_INET);
1148 }
1149 
1150 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1151 {
1152 	return xfrm_policy_check(sk, dir, skb, AF_INET6);
1153 }
1154 
1155 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1156 					     struct sk_buff *skb)
1157 {
1158 	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1159 }
1160 
1161 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1162 					     struct sk_buff *skb)
1163 {
1164 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1165 }
1166 
1167 int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1168 			  unsigned int family, int reverse);
1169 
1170 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
1171 				      unsigned int family)
1172 {
1173 	return __xfrm_decode_session(skb, fl, family, 0);
1174 }
1175 
1176 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1177 					      struct flowi *fl,
1178 					      unsigned int family)
1179 {
1180 	return __xfrm_decode_session(skb, fl, family, 1);
1181 }
1182 
1183 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1184 
1185 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1186 {
1187 	struct net *net = dev_net(skb->dev);
1188 
1189 	return	!net->xfrm.policy_count[XFRM_POLICY_OUT] ||
1190 		(skb_dst(skb)->flags & DST_NOXFRM) ||
1191 		__xfrm_route_forward(skb, family);
1192 }
1193 
1194 static inline int xfrm4_route_forward(struct sk_buff *skb)
1195 {
1196 	return xfrm_route_forward(skb, AF_INET);
1197 }
1198 
1199 static inline int xfrm6_route_forward(struct sk_buff *skb)
1200 {
1201 	return xfrm_route_forward(skb, AF_INET6);
1202 }
1203 
1204 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1205 
1206 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1207 {
1208 	sk->sk_policy[0] = NULL;
1209 	sk->sk_policy[1] = NULL;
1210 	if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1211 		return __xfrm_sk_clone_policy(sk, osk);
1212 	return 0;
1213 }
1214 
1215 int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1216 
1217 static inline void xfrm_sk_free_policy(struct sock *sk)
1218 {
1219 	struct xfrm_policy *pol;
1220 
1221 	pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1222 	if (unlikely(pol != NULL)) {
1223 		xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1224 		sk->sk_policy[0] = NULL;
1225 	}
1226 	pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1227 	if (unlikely(pol != NULL)) {
1228 		xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1229 		sk->sk_policy[1] = NULL;
1230 	}
1231 }
1232 
1233 #else
1234 
1235 static inline void xfrm_sk_free_policy(struct sock *sk) {}
1236 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
1237 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1238 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1239 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1240 {
1241 	return 1;
1242 }
1243 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1244 {
1245 	return 1;
1246 }
1247 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1248 {
1249 	return 1;
1250 }
1251 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1252 					      struct flowi *fl,
1253 					      unsigned int family)
1254 {
1255 	return -ENOSYS;
1256 }
1257 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1258 					     struct sk_buff *skb)
1259 {
1260 	return 1;
1261 }
1262 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1263 					     struct sk_buff *skb)
1264 {
1265 	return 1;
1266 }
1267 #endif
1268 
1269 static __inline__
1270 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1271 {
1272 	switch (family){
1273 	case AF_INET:
1274 		return (xfrm_address_t *)&fl->u.ip4.daddr;
1275 	case AF_INET6:
1276 		return (xfrm_address_t *)&fl->u.ip6.daddr;
1277 	}
1278 	return NULL;
1279 }
1280 
1281 static __inline__
1282 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1283 {
1284 	switch (family){
1285 	case AF_INET:
1286 		return (xfrm_address_t *)&fl->u.ip4.saddr;
1287 	case AF_INET6:
1288 		return (xfrm_address_t *)&fl->u.ip6.saddr;
1289 	}
1290 	return NULL;
1291 }
1292 
1293 static __inline__
1294 void xfrm_flowi_addr_get(const struct flowi *fl,
1295 			 xfrm_address_t *saddr, xfrm_address_t *daddr,
1296 			 unsigned short family)
1297 {
1298 	switch(family) {
1299 	case AF_INET:
1300 		memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1301 		memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1302 		break;
1303 	case AF_INET6:
1304 		saddr->in6 = fl->u.ip6.saddr;
1305 		daddr->in6 = fl->u.ip6.daddr;
1306 		break;
1307 	}
1308 }
1309 
1310 static __inline__ int
1311 __xfrm4_state_addr_check(const struct xfrm_state *x,
1312 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1313 {
1314 	if (daddr->a4 == x->id.daddr.a4 &&
1315 	    (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1316 		return 1;
1317 	return 0;
1318 }
1319 
1320 static __inline__ int
1321 __xfrm6_state_addr_check(const struct xfrm_state *x,
1322 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1323 {
1324 	if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1325 	    (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1326 	     ipv6_addr_any((struct in6_addr *)saddr) ||
1327 	     ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1328 		return 1;
1329 	return 0;
1330 }
1331 
1332 static __inline__ int
1333 xfrm_state_addr_check(const struct xfrm_state *x,
1334 		      const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1335 		      unsigned short family)
1336 {
1337 	switch (family) {
1338 	case AF_INET:
1339 		return __xfrm4_state_addr_check(x, daddr, saddr);
1340 	case AF_INET6:
1341 		return __xfrm6_state_addr_check(x, daddr, saddr);
1342 	}
1343 	return 0;
1344 }
1345 
1346 static __inline__ int
1347 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1348 			   unsigned short family)
1349 {
1350 	switch (family) {
1351 	case AF_INET:
1352 		return __xfrm4_state_addr_check(x,
1353 						(const xfrm_address_t *)&fl->u.ip4.daddr,
1354 						(const xfrm_address_t *)&fl->u.ip4.saddr);
1355 	case AF_INET6:
1356 		return __xfrm6_state_addr_check(x,
1357 						(const xfrm_address_t *)&fl->u.ip6.daddr,
1358 						(const xfrm_address_t *)&fl->u.ip6.saddr);
1359 	}
1360 	return 0;
1361 }
1362 
1363 static inline int xfrm_state_kern(const struct xfrm_state *x)
1364 {
1365 	return atomic_read(&x->tunnel_users);
1366 }
1367 
1368 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1369 {
1370 	return (!userproto || proto == userproto ||
1371 		(userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1372 						  proto == IPPROTO_ESP ||
1373 						  proto == IPPROTO_COMP)));
1374 }
1375 
1376 /*
1377  * xfrm algorithm information
1378  */
1379 struct xfrm_algo_aead_info {
1380 	char *geniv;
1381 	u16 icv_truncbits;
1382 };
1383 
1384 struct xfrm_algo_auth_info {
1385 	u16 icv_truncbits;
1386 	u16 icv_fullbits;
1387 };
1388 
1389 struct xfrm_algo_encr_info {
1390 	char *geniv;
1391 	u16 blockbits;
1392 	u16 defkeybits;
1393 };
1394 
1395 struct xfrm_algo_comp_info {
1396 	u16 threshold;
1397 };
1398 
1399 struct xfrm_algo_desc {
1400 	char *name;
1401 	char *compat;
1402 	u8 available:1;
1403 	u8 pfkey_supported:1;
1404 	union {
1405 		struct xfrm_algo_aead_info aead;
1406 		struct xfrm_algo_auth_info auth;
1407 		struct xfrm_algo_encr_info encr;
1408 		struct xfrm_algo_comp_info comp;
1409 	} uinfo;
1410 	struct sadb_alg desc;
1411 };
1412 
1413 /* XFRM protocol handlers.  */
1414 struct xfrm4_protocol {
1415 	int (*handler)(struct sk_buff *skb);
1416 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1417 			     int encap_type);
1418 	int (*cb_handler)(struct sk_buff *skb, int err);
1419 	int (*err_handler)(struct sk_buff *skb, u32 info);
1420 
1421 	struct xfrm4_protocol __rcu *next;
1422 	int priority;
1423 };
1424 
1425 struct xfrm6_protocol {
1426 	int (*handler)(struct sk_buff *skb);
1427 	int (*cb_handler)(struct sk_buff *skb, int err);
1428 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1429 			   u8 type, u8 code, int offset, __be32 info);
1430 
1431 	struct xfrm6_protocol __rcu *next;
1432 	int priority;
1433 };
1434 
1435 /* XFRM tunnel handlers.  */
1436 struct xfrm_tunnel {
1437 	int (*handler)(struct sk_buff *skb);
1438 	int (*err_handler)(struct sk_buff *skb, u32 info);
1439 
1440 	struct xfrm_tunnel __rcu *next;
1441 	int priority;
1442 };
1443 
1444 struct xfrm6_tunnel {
1445 	int (*handler)(struct sk_buff *skb);
1446 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1447 			   u8 type, u8 code, int offset, __be32 info);
1448 	struct xfrm6_tunnel __rcu *next;
1449 	int priority;
1450 };
1451 
1452 void xfrm_init(void);
1453 void xfrm4_init(void);
1454 int xfrm_state_init(struct net *net);
1455 void xfrm_state_fini(struct net *net);
1456 void xfrm4_state_init(void);
1457 void xfrm4_protocol_init(void);
1458 #ifdef CONFIG_XFRM
1459 int xfrm6_init(void);
1460 void xfrm6_fini(void);
1461 int xfrm6_state_init(void);
1462 void xfrm6_state_fini(void);
1463 int xfrm6_protocol_init(void);
1464 void xfrm6_protocol_fini(void);
1465 #else
1466 static inline int xfrm6_init(void)
1467 {
1468 	return 0;
1469 }
1470 static inline void xfrm6_fini(void)
1471 {
1472 	;
1473 }
1474 #endif
1475 
1476 #ifdef CONFIG_XFRM_STATISTICS
1477 int xfrm_proc_init(struct net *net);
1478 void xfrm_proc_fini(struct net *net);
1479 #endif
1480 
1481 int xfrm_sysctl_init(struct net *net);
1482 #ifdef CONFIG_SYSCTL
1483 void xfrm_sysctl_fini(struct net *net);
1484 #else
1485 static inline void xfrm_sysctl_fini(struct net *net)
1486 {
1487 }
1488 #endif
1489 
1490 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1491 			  struct xfrm_address_filter *filter);
1492 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1493 		    int (*func)(struct xfrm_state *, int, void*), void *);
1494 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1495 struct xfrm_state *xfrm_state_alloc(struct net *net);
1496 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1497 				   const xfrm_address_t *saddr,
1498 				   const struct flowi *fl,
1499 				   struct xfrm_tmpl *tmpl,
1500 				   struct xfrm_policy *pol, int *err,
1501 				   unsigned short family);
1502 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
1503 				       xfrm_address_t *daddr,
1504 				       xfrm_address_t *saddr,
1505 				       unsigned short family,
1506 				       u8 mode, u8 proto, u32 reqid);
1507 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1508 					      unsigned short family);
1509 int xfrm_state_check_expire(struct xfrm_state *x);
1510 void xfrm_state_insert(struct xfrm_state *x);
1511 int xfrm_state_add(struct xfrm_state *x);
1512 int xfrm_state_update(struct xfrm_state *x);
1513 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1514 				     const xfrm_address_t *daddr, __be32 spi,
1515 				     u8 proto, unsigned short family);
1516 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1517 					    const xfrm_address_t *daddr,
1518 					    const xfrm_address_t *saddr,
1519 					    u8 proto,
1520 					    unsigned short family);
1521 #ifdef CONFIG_XFRM_SUB_POLICY
1522 int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1523 		   unsigned short family, struct net *net);
1524 int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1525 		    unsigned short family);
1526 #else
1527 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1528 				 int n, unsigned short family, struct net *net)
1529 {
1530 	return -ENOSYS;
1531 }
1532 
1533 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1534 				  int n, unsigned short family)
1535 {
1536 	return -ENOSYS;
1537 }
1538 #endif
1539 
1540 struct xfrmk_sadinfo {
1541 	u32 sadhcnt; /* current hash bkts */
1542 	u32 sadhmcnt; /* max allowed hash bkts */
1543 	u32 sadcnt; /* current running count */
1544 };
1545 
1546 struct xfrmk_spdinfo {
1547 	u32 incnt;
1548 	u32 outcnt;
1549 	u32 fwdcnt;
1550 	u32 inscnt;
1551 	u32 outscnt;
1552 	u32 fwdscnt;
1553 	u32 spdhcnt;
1554 	u32 spdhmcnt;
1555 };
1556 
1557 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
1558 int xfrm_state_delete(struct xfrm_state *x);
1559 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
1560 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1561 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1562 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1563 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1564 int xfrm_init_replay(struct xfrm_state *x);
1565 int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1566 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
1567 int xfrm_init_state(struct xfrm_state *x);
1568 int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1569 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1570 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1571 int xfrm_output_resume(struct sk_buff *skb, int err);
1572 int xfrm_output(struct sock *sk, struct sk_buff *skb);
1573 int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1574 void xfrm_local_error(struct sk_buff *skb, int mtu);
1575 int xfrm4_extract_header(struct sk_buff *skb);
1576 int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1577 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1578 		    int encap_type);
1579 int xfrm4_transport_finish(struct sk_buff *skb, int async);
1580 int xfrm4_rcv(struct sk_buff *skb);
1581 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1582 
1583 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1584 {
1585 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1586 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1587 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1588 	return xfrm_input(skb, nexthdr, spi, 0);
1589 }
1590 
1591 int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1592 int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1593 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1594 int xfrm4_output_finish(struct sock *sk, struct sk_buff *skb);
1595 int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
1596 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1597 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1598 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1599 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1600 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1601 int xfrm6_extract_header(struct sk_buff *skb);
1602 int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1603 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1604 		  struct ip6_tnl *t);
1605 int xfrm6_transport_finish(struct sk_buff *skb, int async);
1606 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1607 int xfrm6_rcv(struct sk_buff *skb);
1608 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1609 		     xfrm_address_t *saddr, u8 proto);
1610 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1611 int xfrm6_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
1612 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
1613 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
1614 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1615 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1616 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1617 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1618 int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1619 int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1620 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1621 int xfrm6_output_finish(struct sock *sk, struct sk_buff *skb);
1622 int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1623 			  u8 **prevhdr);
1624 
1625 #ifdef CONFIG_XFRM
1626 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1627 int xfrm_user_policy(struct sock *sk, int optname,
1628 		     u8 __user *optval, int optlen);
1629 #else
1630 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1631 {
1632  	return -ENOPROTOOPT;
1633 }
1634 
1635 static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
1636 {
1637  	/* should not happen */
1638  	kfree_skb(skb);
1639 	return 0;
1640 }
1641 #endif
1642 
1643 struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
1644 				    const xfrm_address_t *saddr,
1645 				    const xfrm_address_t *daddr,
1646 				    int family, u32 mark);
1647 
1648 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1649 
1650 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1651 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1652 		     int (*func)(struct xfrm_policy *, int, int, void*),
1653 		     void *);
1654 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1655 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1656 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
1657 					  u8 type, int dir,
1658 					  struct xfrm_selector *sel,
1659 					  struct xfrm_sec_ctx *ctx, int delete,
1660 					  int *err);
1661 struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
1662 				     u32 id, int delete, int *err);
1663 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1664 void xfrm_policy_hash_rebuild(struct net *net);
1665 u32 xfrm_get_acqseq(void);
1666 int verify_spi_info(u8 proto, u32 min, u32 max);
1667 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1668 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1669 				 u8 mode, u32 reqid, u8 proto,
1670 				 const xfrm_address_t *daddr,
1671 				 const xfrm_address_t *saddr, int create,
1672 				 unsigned short family);
1673 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1674 
1675 #ifdef CONFIG_XFRM_MIGRATE
1676 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1677 	       const struct xfrm_migrate *m, int num_bundles,
1678 	       const struct xfrm_kmaddress *k,
1679 	       const struct xfrm_encap_tmpl *encap);
1680 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net);
1681 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1682 				      struct xfrm_migrate *m,
1683 				      struct xfrm_encap_tmpl *encap);
1684 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1685 		 struct xfrm_migrate *m, int num_bundles,
1686 		 struct xfrm_kmaddress *k, struct net *net,
1687 		 struct xfrm_encap_tmpl *encap);
1688 #endif
1689 
1690 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1691 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1692 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1693 	      xfrm_address_t *addr);
1694 
1695 void xfrm_input_init(void);
1696 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1697 
1698 void xfrm_probe_algs(void);
1699 int xfrm_count_pfkey_auth_supported(void);
1700 int xfrm_count_pfkey_enc_supported(void);
1701 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1702 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1703 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1704 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1705 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1706 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1707 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1708 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1709 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1710 					    int probe);
1711 
1712 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1713 				    const xfrm_address_t *b)
1714 {
1715 	return ipv6_addr_equal((const struct in6_addr *)a,
1716 			       (const struct in6_addr *)b);
1717 }
1718 
1719 static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1720 				   const xfrm_address_t *b,
1721 				   sa_family_t family)
1722 {
1723 	switch (family) {
1724 	default:
1725 	case AF_INET:
1726 		return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1727 	case AF_INET6:
1728 		return xfrm6_addr_equal(a, b);
1729 	}
1730 }
1731 
1732 static inline int xfrm_policy_id2dir(u32 index)
1733 {
1734 	return index & 7;
1735 }
1736 
1737 #ifdef CONFIG_XFRM
1738 static inline int xfrm_aevent_is_on(struct net *net)
1739 {
1740 	struct sock *nlsk;
1741 	int ret = 0;
1742 
1743 	rcu_read_lock();
1744 	nlsk = rcu_dereference(net->xfrm.nlsk);
1745 	if (nlsk)
1746 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1747 	rcu_read_unlock();
1748 	return ret;
1749 }
1750 
1751 static inline int xfrm_acquire_is_on(struct net *net)
1752 {
1753 	struct sock *nlsk;
1754 	int ret = 0;
1755 
1756 	rcu_read_lock();
1757 	nlsk = rcu_dereference(net->xfrm.nlsk);
1758 	if (nlsk)
1759 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1760 	rcu_read_unlock();
1761 
1762 	return ret;
1763 }
1764 #endif
1765 
1766 static inline int aead_len(struct xfrm_algo_aead *alg)
1767 {
1768 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1769 }
1770 
1771 static inline int xfrm_alg_len(const struct xfrm_algo *alg)
1772 {
1773 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1774 }
1775 
1776 static inline int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1777 {
1778 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1779 }
1780 
1781 static inline int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
1782 {
1783 	return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
1784 }
1785 
1786 #ifdef CONFIG_XFRM_MIGRATE
1787 static inline int xfrm_replay_clone(struct xfrm_state *x,
1788 				     struct xfrm_state *orig)
1789 {
1790 	x->replay_esn = kzalloc(xfrm_replay_state_esn_len(orig->replay_esn),
1791 				GFP_KERNEL);
1792 	if (!x->replay_esn)
1793 		return -ENOMEM;
1794 
1795 	x->replay_esn->bmp_len = orig->replay_esn->bmp_len;
1796 	x->replay_esn->replay_window = orig->replay_esn->replay_window;
1797 
1798 	x->preplay_esn = kmemdup(x->replay_esn,
1799 				 xfrm_replay_state_esn_len(x->replay_esn),
1800 				 GFP_KERNEL);
1801 	if (!x->preplay_esn) {
1802 		kfree(x->replay_esn);
1803 		return -ENOMEM;
1804 	}
1805 
1806 	return 0;
1807 }
1808 
1809 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
1810 {
1811 	return kmemdup(orig, aead_len(orig), GFP_KERNEL);
1812 }
1813 
1814 
1815 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1816 {
1817 	return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1818 }
1819 
1820 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
1821 {
1822 	return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
1823 }
1824 
1825 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1826 {
1827 	int i;
1828 	for (i = 0; i < n; i++)
1829 		xfrm_state_put(*(states + i));
1830 }
1831 
1832 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1833 {
1834 	int i;
1835 	for (i = 0; i < n; i++)
1836 		xfrm_state_delete(*(states + i));
1837 }
1838 #endif
1839 
1840 #ifdef CONFIG_XFRM
1841 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1842 {
1843 	return skb->sp->xvec[skb->sp->len - 1];
1844 }
1845 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1846 {
1847 	struct sec_path *sp = skb->sp;
1848 
1849 	if (!sp || !sp->olen || sp->len != sp->olen)
1850 		return NULL;
1851 
1852 	return &sp->ovec[sp->olen - 1];
1853 }
1854 #endif
1855 
1856 void __net_init xfrm_dev_init(void);
1857 
1858 #ifdef CONFIG_XFRM_OFFLOAD
1859 int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
1860 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
1861 		       struct xfrm_user_offload *xuo);
1862 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
1863 
1864 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
1865 {
1866 	struct xfrm_state *x = dst->xfrm;
1867 
1868 	if (!x || !x->type_offload)
1869 		return false;
1870 
1871 	if (x->xso.offload_handle && (x->xso.dev == dst->path->dev) &&
1872 	    !dst->child->xfrm)
1873 		return true;
1874 
1875 	return false;
1876 }
1877 
1878 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
1879 {
1880 	struct xfrm_state_offload *xso = &x->xso;
1881 
1882 	if (xso->dev)
1883 		xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
1884 }
1885 
1886 static inline void xfrm_dev_state_free(struct xfrm_state *x)
1887 {
1888 	struct xfrm_state_offload *xso = &x->xso;
1889 	 struct net_device *dev = xso->dev;
1890 
1891 	if (dev && dev->xfrmdev_ops) {
1892 		dev->xfrmdev_ops->xdo_dev_state_free(x);
1893 		xso->dev = NULL;
1894 		dev_put(dev);
1895 	}
1896 }
1897 #else
1898 static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
1899 {
1900 	return 0;
1901 }
1902 
1903 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
1904 {
1905 	return 0;
1906 }
1907 
1908 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
1909 {
1910 }
1911 
1912 static inline void xfrm_dev_state_free(struct xfrm_state *x)
1913 {
1914 }
1915 
1916 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
1917 {
1918 	return false;
1919 }
1920 
1921 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
1922 {
1923 	return false;
1924 }
1925 #endif
1926 
1927 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
1928 {
1929 	if (attrs[XFRMA_MARK])
1930 		memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
1931 	else
1932 		m->v = m->m = 0;
1933 
1934 	return m->v & m->m;
1935 }
1936 
1937 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
1938 {
1939 	int ret = 0;
1940 
1941 	if (m->m | m->v)
1942 		ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
1943 	return ret;
1944 }
1945 
1946 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
1947 				    unsigned int family)
1948 {
1949 	bool tunnel = false;
1950 
1951 	switch(family) {
1952 	case AF_INET:
1953 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
1954 			tunnel = true;
1955 		break;
1956 	case AF_INET6:
1957 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
1958 			tunnel = true;
1959 		break;
1960 	}
1961 	if (tunnel && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL))
1962 		return -EINVAL;
1963 
1964 	return 0;
1965 }
1966 #endif	/* _NET_XFRM_H */
1967