xref: /linux/include/net/xfrm.h (revision ed3174d93c342b8b2eeba6bbd124707d55304a7b)
1 #ifndef _NET_XFRM_H
2 #define _NET_XFRM_H
3 
4 #include <linux/compiler.h>
5 #include <linux/xfrm.h>
6 #include <linux/spinlock.h>
7 #include <linux/list.h>
8 #include <linux/skbuff.h>
9 #include <linux/socket.h>
10 #include <linux/pfkeyv2.h>
11 #include <linux/ipsec.h>
12 #include <linux/in6.h>
13 #include <linux/mutex.h>
14 #include <linux/audit.h>
15 
16 #include <net/sock.h>
17 #include <net/dst.h>
18 #include <net/ip.h>
19 #include <net/route.h>
20 #include <net/ipv6.h>
21 #include <net/ip6_fib.h>
22 #ifdef CONFIG_XFRM_STATISTICS
23 #include <net/snmp.h>
24 #endif
25 
26 #define XFRM_PROTO_ESP		50
27 #define XFRM_PROTO_AH		51
28 #define XFRM_PROTO_COMP		108
29 #define XFRM_PROTO_IPIP		4
30 #define XFRM_PROTO_IPV6		41
31 #define XFRM_PROTO_ROUTING	IPPROTO_ROUTING
32 #define XFRM_PROTO_DSTOPTS	IPPROTO_DSTOPTS
33 
34 #define XFRM_ALIGN8(len)	(((len) + 7) & ~7)
35 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
36 	MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
37 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
38 	MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
39 
40 #ifdef CONFIG_XFRM_STATISTICS
41 DECLARE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
42 #define XFRM_INC_STATS(field)		SNMP_INC_STATS(xfrm_statistics, field)
43 #define XFRM_INC_STATS_BH(field)	SNMP_INC_STATS_BH(xfrm_statistics, field)
44 #define XFRM_INC_STATS_USER(field) 	SNMP_INC_STATS_USER(xfrm_statistics, field)
45 #else
46 #define XFRM_INC_STATS(field)
47 #define XFRM_INC_STATS_BH(field)
48 #define XFRM_INC_STATS_USER(field)
49 #endif
50 
51 extern struct sock *xfrm_nl;
52 extern u32 sysctl_xfrm_aevent_etime;
53 extern u32 sysctl_xfrm_aevent_rseqth;
54 extern int sysctl_xfrm_larval_drop;
55 extern u32 sysctl_xfrm_acq_expires;
56 
57 extern struct mutex xfrm_cfg_mutex;
58 
59 /* Organization of SPD aka "XFRM rules"
60    ------------------------------------
61 
62    Basic objects:
63    - policy rule, struct xfrm_policy (=SPD entry)
64    - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
65    - instance of a transformer, struct xfrm_state (=SA)
66    - template to clone xfrm_state, struct xfrm_tmpl
67 
68    SPD is plain linear list of xfrm_policy rules, ordered by priority.
69    (To be compatible with existing pfkeyv2 implementations,
70    many rules with priority of 0x7fffffff are allowed to exist and
71    such rules are ordered in an unpredictable way, thanks to bsd folks.)
72 
73    Lookup is plain linear search until the first match with selector.
74 
75    If "action" is "block", then we prohibit the flow, otherwise:
76    if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
77    policy entry has list of up to XFRM_MAX_DEPTH transformations,
78    described by templates xfrm_tmpl. Each template is resolved
79    to a complete xfrm_state (see below) and we pack bundle of transformations
80    to a dst_entry returned to requestor.
81 
82    dst -. xfrm  .-> xfrm_state #1
83     |---. child .-> dst -. xfrm .-> xfrm_state #2
84                      |---. child .-> dst -. xfrm .-> xfrm_state #3
85                                       |---. child .-> NULL
86 
87    Bundles are cached at xrfm_policy struct (field ->bundles).
88 
89 
90    Resolution of xrfm_tmpl
91    -----------------------
92    Template contains:
93    1. ->mode		Mode: transport or tunnel
94    2. ->id.proto	Protocol: AH/ESP/IPCOMP
95    3. ->id.daddr	Remote tunnel endpoint, ignored for transport mode.
96       Q: allow to resolve security gateway?
97    4. ->id.spi          If not zero, static SPI.
98    5. ->saddr		Local tunnel endpoint, ignored for transport mode.
99    6. ->algos		List of allowed algos. Plain bitmask now.
100       Q: ealgos, aalgos, calgos. What a mess...
101    7. ->share		Sharing mode.
102       Q: how to implement private sharing mode? To add struct sock* to
103       flow id?
104 
105    Having this template we search through SAD searching for entries
106    with appropriate mode/proto/algo, permitted by selector.
107    If no appropriate entry found, it is requested from key manager.
108 
109    PROBLEMS:
110    Q: How to find all the bundles referring to a physical path for
111       PMTU discovery? Seems, dst should contain list of all parents...
112       and enter to infinite locking hierarchy disaster.
113       No! It is easier, we will not search for them, let them find us.
114       We add genid to each dst plus pointer to genid of raw IP route,
115       pmtu disc will update pmtu on raw IP route and increase its genid.
116       dst_check() will see this for top level and trigger resyncing
117       metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
118  */
119 
120 /* Full description of state of transformer. */
121 struct xfrm_state
122 {
123 	/* Note: bydst is re-used during gc */
124 	struct hlist_node	bydst;
125 	struct hlist_node	bysrc;
126 	struct hlist_node	byspi;
127 
128 	atomic_t		refcnt;
129 	spinlock_t		lock;
130 
131 	struct xfrm_id		id;
132 	struct xfrm_selector	sel;
133 
134 	u32			genid;
135 
136 	/* Key manger bits */
137 	struct {
138 		u8		state;
139 		u8		dying;
140 		u32		seq;
141 	} km;
142 
143 	/* Parameters of this state. */
144 	struct {
145 		u32		reqid;
146 		u8		mode;
147 		u8		replay_window;
148 		u8		aalgo, ealgo, calgo;
149 		u8		flags;
150 		u16		family;
151 		xfrm_address_t	saddr;
152 		int		header_len;
153 		int		trailer_len;
154 	} props;
155 
156 	struct xfrm_lifetime_cfg lft;
157 
158 	/* Data for transformer */
159 	struct xfrm_algo	*aalg;
160 	struct xfrm_algo	*ealg;
161 	struct xfrm_algo	*calg;
162 	struct xfrm_algo_aead	*aead;
163 
164 	/* Data for encapsulator */
165 	struct xfrm_encap_tmpl	*encap;
166 
167 	/* Data for care-of address */
168 	xfrm_address_t	*coaddr;
169 
170 	/* IPComp needs an IPIP tunnel for handling uncompressed packets */
171 	struct xfrm_state	*tunnel;
172 
173 	/* If a tunnel, number of users + 1 */
174 	atomic_t		tunnel_users;
175 
176 	/* State for replay detection */
177 	struct xfrm_replay_state replay;
178 
179 	/* Replay detection state at the time we sent the last notification */
180 	struct xfrm_replay_state preplay;
181 
182 	/* internal flag that only holds state for delayed aevent at the
183 	 * moment
184 	*/
185 	u32			xflags;
186 
187 	/* Replay detection notification settings */
188 	u32			replay_maxage;
189 	u32			replay_maxdiff;
190 
191 	/* Replay detection notification timer */
192 	struct timer_list	rtimer;
193 
194 	/* Statistics */
195 	struct xfrm_stats	stats;
196 
197 	struct xfrm_lifetime_cur curlft;
198 	struct timer_list	timer;
199 
200 	/* Last used time */
201 	unsigned long		lastused;
202 
203 	/* Reference to data common to all the instances of this
204 	 * transformer. */
205 	const struct xfrm_type	*type;
206 	struct xfrm_mode	*inner_mode;
207 	struct xfrm_mode	*outer_mode;
208 
209 	/* Security context */
210 	struct xfrm_sec_ctx	*security;
211 
212 	/* Private data of this transformer, format is opaque,
213 	 * interpreted by xfrm_type methods. */
214 	void			*data;
215 };
216 
217 /* xflags - make enum if more show up */
218 #define XFRM_TIME_DEFER	1
219 
220 enum {
221 	XFRM_STATE_VOID,
222 	XFRM_STATE_ACQ,
223 	XFRM_STATE_VALID,
224 	XFRM_STATE_ERROR,
225 	XFRM_STATE_EXPIRED,
226 	XFRM_STATE_DEAD
227 };
228 
229 /* callback structure passed from either netlink or pfkey */
230 struct km_event
231 {
232 	union {
233 		u32 hard;
234 		u32 proto;
235 		u32 byid;
236 		u32 aevent;
237 		u32 type;
238 	} data;
239 
240 	u32	seq;
241 	u32	pid;
242 	u32	event;
243 };
244 
245 struct net_device;
246 struct xfrm_type;
247 struct xfrm_dst;
248 struct xfrm_policy_afinfo {
249 	unsigned short		family;
250 	struct dst_ops		*dst_ops;
251 	void			(*garbage_collect)(void);
252 	struct dst_entry	*(*dst_lookup)(int tos, xfrm_address_t *saddr,
253 					       xfrm_address_t *daddr);
254 	int			(*get_saddr)(xfrm_address_t *saddr, xfrm_address_t *daddr);
255 	struct dst_entry	*(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy);
256 	void			(*decode_session)(struct sk_buff *skb,
257 						  struct flowi *fl,
258 						  int reverse);
259 	int			(*get_tos)(struct flowi *fl);
260 	int			(*init_path)(struct xfrm_dst *path,
261 					     struct dst_entry *dst,
262 					     int nfheader_len);
263 	int			(*fill_dst)(struct xfrm_dst *xdst,
264 					    struct net_device *dev);
265 };
266 
267 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
268 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
269 extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c);
270 extern void km_state_notify(struct xfrm_state *x, struct km_event *c);
271 
272 struct xfrm_tmpl;
273 extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
274 extern void km_state_expired(struct xfrm_state *x, int hard, u32 pid);
275 extern int __xfrm_state_delete(struct xfrm_state *x);
276 
277 struct xfrm_state_afinfo {
278 	unsigned int		family;
279 	unsigned int		proto;
280 	unsigned int		eth_proto;
281 	struct module		*owner;
282 	const struct xfrm_type	*type_map[IPPROTO_MAX];
283 	struct xfrm_mode	*mode_map[XFRM_MODE_MAX];
284 	int			(*init_flags)(struct xfrm_state *x);
285 	void			(*init_tempsel)(struct xfrm_state *x, struct flowi *fl,
286 						struct xfrm_tmpl *tmpl,
287 						xfrm_address_t *daddr, xfrm_address_t *saddr);
288 	int			(*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n);
289 	int			(*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n);
290 	int			(*output)(struct sk_buff *skb);
291 	int			(*extract_input)(struct xfrm_state *x,
292 						 struct sk_buff *skb);
293 	int			(*extract_output)(struct xfrm_state *x,
294 						  struct sk_buff *skb);
295 	int			(*transport_finish)(struct sk_buff *skb,
296 						    int async);
297 };
298 
299 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
300 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
301 
302 extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
303 
304 struct xfrm_type
305 {
306 	char			*description;
307 	struct module		*owner;
308 	__u8			proto;
309 	__u8			flags;
310 #define XFRM_TYPE_NON_FRAGMENT	1
311 #define XFRM_TYPE_REPLAY_PROT	2
312 #define XFRM_TYPE_LOCAL_COADDR	4
313 #define XFRM_TYPE_REMOTE_COADDR	8
314 
315 	int			(*init_state)(struct xfrm_state *x);
316 	void			(*destructor)(struct xfrm_state *);
317 	int			(*input)(struct xfrm_state *, struct sk_buff *skb);
318 	int			(*output)(struct xfrm_state *, struct sk_buff *pskb);
319 	int			(*reject)(struct xfrm_state *, struct sk_buff *, struct flowi *);
320 	int			(*hdr_offset)(struct xfrm_state *, struct sk_buff *, u8 **);
321 	/* Estimate maximal size of result of transformation of a dgram */
322 	u32			(*get_mtu)(struct xfrm_state *, int size);
323 };
324 
325 extern int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
326 extern int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
327 
328 struct xfrm_mode {
329 	/*
330 	 * Remove encapsulation header.
331 	 *
332 	 * The IP header will be moved over the top of the encapsulation
333 	 * header.
334 	 *
335 	 * On entry, the transport header shall point to where the IP header
336 	 * should be and the network header shall be set to where the IP
337 	 * header currently is.  skb->data shall point to the start of the
338 	 * payload.
339 	 */
340 	int (*input2)(struct xfrm_state *x, struct sk_buff *skb);
341 
342 	/*
343 	 * This is the actual input entry point.
344 	 *
345 	 * For transport mode and equivalent this would be identical to
346 	 * input2 (which does not need to be set).  While tunnel mode
347 	 * and equivalent would set this to the tunnel encapsulation function
348 	 * xfrm4_prepare_input that would in turn call input2.
349 	 */
350 	int (*input)(struct xfrm_state *x, struct sk_buff *skb);
351 
352 	/*
353 	 * Add encapsulation header.
354 	 *
355 	 * On exit, the transport header will be set to the start of the
356 	 * encapsulation header to be filled in by x->type->output and
357 	 * the mac header will be set to the nextheader (protocol for
358 	 * IPv4) field of the extension header directly preceding the
359 	 * encapsulation header, or in its absence, that of the top IP
360 	 * header.  The value of the network header will always point
361 	 * to the top IP header while skb->data will point to the payload.
362 	 */
363 	int (*output2)(struct xfrm_state *x,struct sk_buff *skb);
364 
365 	/*
366 	 * This is the actual output entry point.
367 	 *
368 	 * For transport mode and equivalent this would be identical to
369 	 * output2 (which does not need to be set).  While tunnel mode
370 	 * and equivalent would set this to a tunnel encapsulation function
371 	 * (xfrm4_prepare_output or xfrm6_prepare_output) that would in turn
372 	 * call output2.
373 	 */
374 	int (*output)(struct xfrm_state *x, struct sk_buff *skb);
375 
376 	struct xfrm_state_afinfo *afinfo;
377 	struct module *owner;
378 	unsigned int encap;
379 	int flags;
380 };
381 
382 /* Flags for xfrm_mode. */
383 enum {
384 	XFRM_MODE_FLAG_TUNNEL = 1,
385 };
386 
387 extern int xfrm_register_mode(struct xfrm_mode *mode, int family);
388 extern int xfrm_unregister_mode(struct xfrm_mode *mode, int family);
389 
390 struct xfrm_tmpl
391 {
392 /* id in template is interpreted as:
393  * daddr - destination of tunnel, may be zero for transport mode.
394  * spi   - zero to acquire spi. Not zero if spi is static, then
395  *	   daddr must be fixed too.
396  * proto - AH/ESP/IPCOMP
397  */
398 	struct xfrm_id		id;
399 
400 /* Source address of tunnel. Ignored, if it is not a tunnel. */
401 	xfrm_address_t		saddr;
402 
403 	unsigned short		encap_family;
404 
405 	__u32			reqid;
406 
407 /* Mode: transport, tunnel etc. */
408 	__u8			mode;
409 
410 /* Sharing mode: unique, this session only, this user only etc. */
411 	__u8			share;
412 
413 /* May skip this transfomration if no SA is found */
414 	__u8			optional;
415 
416 /* Bit mask of algos allowed for acquisition */
417 	__u32			aalgos;
418 	__u32			ealgos;
419 	__u32			calgos;
420 };
421 
422 #define XFRM_MAX_DEPTH		6
423 
424 struct xfrm_policy
425 {
426 	struct xfrm_policy	*next;
427 	struct hlist_node	bydst;
428 	struct hlist_node	byidx;
429 
430 	/* This lock only affects elements except for entry. */
431 	rwlock_t		lock;
432 	atomic_t		refcnt;
433 	struct timer_list	timer;
434 
435 	u32			priority;
436 	u32			index;
437 	struct xfrm_selector	selector;
438 	struct xfrm_lifetime_cfg lft;
439 	struct xfrm_lifetime_cur curlft;
440 	struct dst_entry       *bundles;
441 	u16			family;
442 	u8			type;
443 	u8			action;
444 	u8			flags;
445 	u8			dead;
446 	u8			xfrm_nr;
447 	/* XXX 1 byte hole, try to pack */
448 	struct xfrm_sec_ctx	*security;
449 	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
450 };
451 
452 struct xfrm_migrate {
453 	xfrm_address_t		old_daddr;
454 	xfrm_address_t		old_saddr;
455 	xfrm_address_t		new_daddr;
456 	xfrm_address_t		new_saddr;
457 	u8			proto;
458 	u8			mode;
459 	u16			reserved;
460 	u32			reqid;
461 	u16			old_family;
462 	u16			new_family;
463 };
464 
465 #define XFRM_KM_TIMEOUT                30
466 /* which seqno */
467 #define XFRM_REPLAY_SEQ		1
468 #define XFRM_REPLAY_OSEQ	2
469 #define XFRM_REPLAY_SEQ_MASK	3
470 /* what happened */
471 #define XFRM_REPLAY_UPDATE	XFRM_AE_CR
472 #define XFRM_REPLAY_TIMEOUT	XFRM_AE_CE
473 
474 /* default aevent timeout in units of 100ms */
475 #define XFRM_AE_ETIME			10
476 /* Async Event timer multiplier */
477 #define XFRM_AE_ETH_M			10
478 /* default seq threshold size */
479 #define XFRM_AE_SEQT_SIZE		2
480 
481 struct xfrm_mgr
482 {
483 	struct list_head	list;
484 	char			*id;
485 	int			(*notify)(struct xfrm_state *x, struct km_event *c);
486 	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp, int dir);
487 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
488 	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
489 	int			(*notify_policy)(struct xfrm_policy *x, int dir, struct km_event *c);
490 	int			(*report)(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
491 	int			(*migrate)(struct xfrm_selector *sel, u8 dir, u8 type, struct xfrm_migrate *m, int num_bundles);
492 };
493 
494 extern int xfrm_register_km(struct xfrm_mgr *km);
495 extern int xfrm_unregister_km(struct xfrm_mgr *km);
496 
497 extern unsigned int xfrm_policy_count[XFRM_POLICY_MAX*2];
498 
499 /*
500  * This structure is used for the duration where packets are being
501  * transformed by IPsec.  As soon as the packet leaves IPsec the
502  * area beyond the generic IP part may be overwritten.
503  */
504 struct xfrm_skb_cb {
505 	union {
506 		struct inet_skb_parm h4;
507 		struct inet6_skb_parm h6;
508         } header;
509 
510         /* Sequence number for replay protection. */
511         u64 seq;
512 };
513 
514 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
515 
516 /*
517  * This structure is used by the afinfo prepare_input/prepare_output functions
518  * to transmit header information to the mode input/output functions.
519  */
520 struct xfrm_mode_skb_cb {
521 	union {
522 		struct inet_skb_parm h4;
523 		struct inet6_skb_parm h6;
524 	} header;
525 
526 	/* Copied from header for IPv4, always set to zero and DF for IPv6. */
527 	__be16 id;
528 	__be16 frag_off;
529 
530 	/* TOS for IPv4, class for IPv6. */
531 	u8 tos;
532 
533 	/* TTL for IPv4, hop limitfor IPv6. */
534 	u8 ttl;
535 
536 	/* Protocol for IPv4, NH for IPv6. */
537 	u8 protocol;
538 
539 	/* Used by IPv6 only, zero for IPv4. */
540 	u8 flow_lbl[3];
541 };
542 
543 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
544 
545 /*
546  * This structure is used by the input processing to locate the SPI and
547  * related information.
548  */
549 struct xfrm_spi_skb_cb {
550 	union {
551 		struct inet_skb_parm h4;
552 		struct inet6_skb_parm h6;
553 	} header;
554 
555 	unsigned int daddroff;
556 	unsigned int family;
557 };
558 
559 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
560 
561 /* Audit Information */
562 struct xfrm_audit
563 {
564 	u32	loginuid;
565 	u32	secid;
566 };
567 
568 #ifdef CONFIG_AUDITSYSCALL
569 static inline struct audit_buffer *xfrm_audit_start(const char *op)
570 {
571 	struct audit_buffer *audit_buf = NULL;
572 
573 	if (audit_enabled == 0)
574 		return NULL;
575 	audit_buf = audit_log_start(current->audit_context, GFP_ATOMIC,
576 				    AUDIT_MAC_IPSEC_EVENT);
577 	if (audit_buf == NULL)
578 		return NULL;
579 	audit_log_format(audit_buf, "op=%s", op);
580 	return audit_buf;
581 }
582 
583 static inline void xfrm_audit_helper_usrinfo(u32 auid, u32 secid,
584 					     struct audit_buffer *audit_buf)
585 {
586 	char *secctx;
587 	u32 secctx_len;
588 
589 	audit_log_format(audit_buf, " auid=%u", auid);
590 	if (secid != 0 &&
591 	    security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
592 		audit_log_format(audit_buf, " subj=%s", secctx);
593 		security_release_secctx(secctx, secctx_len);
594 	} else
595 		audit_log_task_context(audit_buf);
596 }
597 
598 extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
599 				  u32 auid, u32 secid);
600 extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
601 				  u32 auid, u32 secid);
602 extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
603 				 u32 auid, u32 secid);
604 extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
605 				    u32 auid, u32 secid);
606 extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
607 					     struct sk_buff *skb);
608 extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
609 extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
610 				      __be32 net_spi, __be32 net_seq);
611 extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
612 				     struct sk_buff *skb, u8 proto);
613 #else
614 #define xfrm_audit_policy_add(x, r, a, s)	do { ; } while (0)
615 #define xfrm_audit_policy_delete(x, r, a, s)	do { ; } while (0)
616 #define xfrm_audit_state_add(x, r, a, s)	do { ; } while (0)
617 #define xfrm_audit_state_delete(x, r, a, s)	do { ; } while (0)
618 #define xfrm_audit_state_replay_overflow(x, s)	do { ; } while (0)
619 #define xfrm_audit_state_notfound_simple(s, f)	do { ; } while (0)
620 #define xfrm_audit_state_notfound(s, f, sp, sq)	do { ; } while (0)
621 #define xfrm_audit_state_icvfail(x, s, p)	do { ; } while (0)
622 #endif /* CONFIG_AUDITSYSCALL */
623 
624 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
625 {
626 	if (likely(policy != NULL))
627 		atomic_inc(&policy->refcnt);
628 }
629 
630 extern void xfrm_policy_destroy(struct xfrm_policy *policy);
631 
632 static inline void xfrm_pol_put(struct xfrm_policy *policy)
633 {
634 	if (atomic_dec_and_test(&policy->refcnt))
635 		xfrm_policy_destroy(policy);
636 }
637 
638 #ifdef CONFIG_XFRM_SUB_POLICY
639 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
640 {
641 	int i;
642 	for (i = npols - 1; i >= 0; --i)
643 		xfrm_pol_put(pols[i]);
644 }
645 #else
646 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
647 {
648 	xfrm_pol_put(pols[0]);
649 }
650 #endif
651 
652 extern void __xfrm_state_destroy(struct xfrm_state *);
653 
654 static inline void __xfrm_state_put(struct xfrm_state *x)
655 {
656 	atomic_dec(&x->refcnt);
657 }
658 
659 static inline void xfrm_state_put(struct xfrm_state *x)
660 {
661 	if (atomic_dec_and_test(&x->refcnt))
662 		__xfrm_state_destroy(x);
663 }
664 
665 static inline void xfrm_state_hold(struct xfrm_state *x)
666 {
667 	atomic_inc(&x->refcnt);
668 }
669 
670 static __inline__ int addr_match(void *token1, void *token2, int prefixlen)
671 {
672 	__be32 *a1 = token1;
673 	__be32 *a2 = token2;
674 	int pdw;
675 	int pbi;
676 
677 	pdw = prefixlen >> 5;	  /* num of whole __u32 in prefix */
678 	pbi = prefixlen &  0x1f;  /* num of bits in incomplete u32 in prefix */
679 
680 	if (pdw)
681 		if (memcmp(a1, a2, pdw << 2))
682 			return 0;
683 
684 	if (pbi) {
685 		__be32 mask;
686 
687 		mask = htonl((0xffffffff) << (32 - pbi));
688 
689 		if ((a1[pdw] ^ a2[pdw]) & mask)
690 			return 0;
691 	}
692 
693 	return 1;
694 }
695 
696 static __inline__
697 __be16 xfrm_flowi_sport(struct flowi *fl)
698 {
699 	__be16 port;
700 	switch(fl->proto) {
701 	case IPPROTO_TCP:
702 	case IPPROTO_UDP:
703 	case IPPROTO_UDPLITE:
704 	case IPPROTO_SCTP:
705 		port = fl->fl_ip_sport;
706 		break;
707 	case IPPROTO_ICMP:
708 	case IPPROTO_ICMPV6:
709 		port = htons(fl->fl_icmp_type);
710 		break;
711 	case IPPROTO_MH:
712 		port = htons(fl->fl_mh_type);
713 		break;
714 	default:
715 		port = 0;	/*XXX*/
716 	}
717 	return port;
718 }
719 
720 static __inline__
721 __be16 xfrm_flowi_dport(struct flowi *fl)
722 {
723 	__be16 port;
724 	switch(fl->proto) {
725 	case IPPROTO_TCP:
726 	case IPPROTO_UDP:
727 	case IPPROTO_UDPLITE:
728 	case IPPROTO_SCTP:
729 		port = fl->fl_ip_dport;
730 		break;
731 	case IPPROTO_ICMP:
732 	case IPPROTO_ICMPV6:
733 		port = htons(fl->fl_icmp_code);
734 		break;
735 	default:
736 		port = 0;	/*XXX*/
737 	}
738 	return port;
739 }
740 
741 extern int xfrm_selector_match(struct xfrm_selector *sel, struct flowi *fl,
742 			       unsigned short family);
743 
744 #ifdef CONFIG_SECURITY_NETWORK_XFRM
745 /*	If neither has a context --> match
746  * 	Otherwise, both must have a context and the sids, doi, alg must match
747  */
748 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
749 {
750 	return ((!s1 && !s2) ||
751 		(s1 && s2 &&
752 		 (s1->ctx_sid == s2->ctx_sid) &&
753 		 (s1->ctx_doi == s2->ctx_doi) &&
754 		 (s1->ctx_alg == s2->ctx_alg)));
755 }
756 #else
757 static inline int xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
758 {
759 	return 1;
760 }
761 #endif
762 
763 /* A struct encoding bundle of transformations to apply to some set of flow.
764  *
765  * dst->child points to the next element of bundle.
766  * dst->xfrm  points to an instanse of transformer.
767  *
768  * Due to unfortunate limitations of current routing cache, which we
769  * have no time to fix, it mirrors struct rtable and bound to the same
770  * routing key, including saddr,daddr. However, we can have many of
771  * bundles differing by session id. All the bundles grow from a parent
772  * policy rule.
773  */
774 struct xfrm_dst
775 {
776 	union {
777 		struct dst_entry	dst;
778 		struct rtable		rt;
779 		struct rt6_info		rt6;
780 	} u;
781 	struct dst_entry *route;
782 #ifdef CONFIG_XFRM_SUB_POLICY
783 	struct flowi *origin;
784 	struct xfrm_selector *partner;
785 #endif
786 	u32 genid;
787 	u32 route_mtu_cached;
788 	u32 child_mtu_cached;
789 	u32 route_cookie;
790 	u32 path_cookie;
791 };
792 
793 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
794 {
795 	dst_release(xdst->route);
796 	if (likely(xdst->u.dst.xfrm))
797 		xfrm_state_put(xdst->u.dst.xfrm);
798 #ifdef CONFIG_XFRM_SUB_POLICY
799 	kfree(xdst->origin);
800 	xdst->origin = NULL;
801 	kfree(xdst->partner);
802 	xdst->partner = NULL;
803 #endif
804 }
805 
806 extern void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
807 
808 struct sec_path
809 {
810 	atomic_t		refcnt;
811 	int			len;
812 	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
813 };
814 
815 static inline struct sec_path *
816 secpath_get(struct sec_path *sp)
817 {
818 	if (sp)
819 		atomic_inc(&sp->refcnt);
820 	return sp;
821 }
822 
823 extern void __secpath_destroy(struct sec_path *sp);
824 
825 static inline void
826 secpath_put(struct sec_path *sp)
827 {
828 	if (sp && atomic_dec_and_test(&sp->refcnt))
829 		__secpath_destroy(sp);
830 }
831 
832 extern struct sec_path *secpath_dup(struct sec_path *src);
833 
834 static inline void
835 secpath_reset(struct sk_buff *skb)
836 {
837 #ifdef CONFIG_XFRM
838 	secpath_put(skb->sp);
839 	skb->sp = NULL;
840 #endif
841 }
842 
843 static inline int
844 xfrm_addr_any(xfrm_address_t *addr, unsigned short family)
845 {
846 	switch (family) {
847 	case AF_INET:
848 		return addr->a4 == 0;
849 	case AF_INET6:
850 		return ipv6_addr_any((struct in6_addr *)&addr->a6);
851 	}
852 	return 0;
853 }
854 
855 static inline int
856 __xfrm4_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
857 {
858 	return	(tmpl->saddr.a4 &&
859 		 tmpl->saddr.a4 != x->props.saddr.a4);
860 }
861 
862 static inline int
863 __xfrm6_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x)
864 {
865 	return	(!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
866 		 ipv6_addr_cmp((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
867 }
868 
869 static inline int
870 xfrm_state_addr_cmp(struct xfrm_tmpl *tmpl, struct xfrm_state *x, unsigned short family)
871 {
872 	switch (family) {
873 	case AF_INET:
874 		return __xfrm4_state_addr_cmp(tmpl, x);
875 	case AF_INET6:
876 		return __xfrm6_state_addr_cmp(tmpl, x);
877 	}
878 	return !0;
879 }
880 
881 #ifdef CONFIG_XFRM
882 extern int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb, unsigned short family);
883 
884 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
885 				       struct sk_buff *skb,
886 				       unsigned int family, int reverse)
887 {
888 	int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
889 
890 	if (sk && sk->sk_policy[XFRM_POLICY_IN])
891 		return __xfrm_policy_check(sk, ndir, skb, family);
892 
893 	return	(!xfrm_policy_count[dir] && !skb->sp) ||
894 		(skb->dst->flags & DST_NOPOLICY) ||
895 		__xfrm_policy_check(sk, ndir, skb, family);
896 }
897 
898 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
899 {
900 	return __xfrm_policy_check2(sk, dir, skb, family, 0);
901 }
902 
903 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
904 {
905 	return xfrm_policy_check(sk, dir, skb, AF_INET);
906 }
907 
908 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
909 {
910 	return xfrm_policy_check(sk, dir, skb, AF_INET6);
911 }
912 
913 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
914 					     struct sk_buff *skb)
915 {
916 	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
917 }
918 
919 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
920 					     struct sk_buff *skb)
921 {
922 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
923 }
924 
925 extern int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
926 				 unsigned int family, int reverse);
927 
928 static inline int xfrm_decode_session(struct sk_buff *skb, struct flowi *fl,
929 				      unsigned int family)
930 {
931 	return __xfrm_decode_session(skb, fl, family, 0);
932 }
933 
934 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
935 					      struct flowi *fl,
936 					      unsigned int family)
937 {
938 	return __xfrm_decode_session(skb, fl, family, 1);
939 }
940 
941 extern int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
942 
943 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
944 {
945 	return	!xfrm_policy_count[XFRM_POLICY_OUT] ||
946 		(skb->dst->flags & DST_NOXFRM) ||
947 		__xfrm_route_forward(skb, family);
948 }
949 
950 static inline int xfrm4_route_forward(struct sk_buff *skb)
951 {
952 	return xfrm_route_forward(skb, AF_INET);
953 }
954 
955 static inline int xfrm6_route_forward(struct sk_buff *skb)
956 {
957 	return xfrm_route_forward(skb, AF_INET6);
958 }
959 
960 extern int __xfrm_sk_clone_policy(struct sock *sk);
961 
962 static inline int xfrm_sk_clone_policy(struct sock *sk)
963 {
964 	if (unlikely(sk->sk_policy[0] || sk->sk_policy[1]))
965 		return __xfrm_sk_clone_policy(sk);
966 	return 0;
967 }
968 
969 extern int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
970 
971 static inline void xfrm_sk_free_policy(struct sock *sk)
972 {
973 	if (unlikely(sk->sk_policy[0] != NULL)) {
974 		xfrm_policy_delete(sk->sk_policy[0], XFRM_POLICY_MAX);
975 		sk->sk_policy[0] = NULL;
976 	}
977 	if (unlikely(sk->sk_policy[1] != NULL)) {
978 		xfrm_policy_delete(sk->sk_policy[1], XFRM_POLICY_MAX+1);
979 		sk->sk_policy[1] = NULL;
980 	}
981 }
982 
983 #else
984 
985 static inline void xfrm_sk_free_policy(struct sock *sk) {}
986 static inline int xfrm_sk_clone_policy(struct sock *sk) { return 0; }
987 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
988 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
989 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
990 {
991 	return 1;
992 }
993 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
994 {
995 	return 1;
996 }
997 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
998 {
999 	return 1;
1000 }
1001 static inline int xfrm_decode_session_reverse(struct sk_buff *skb,
1002 					      struct flowi *fl,
1003 					      unsigned int family)
1004 {
1005 	return -ENOSYS;
1006 }
1007 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1008 					     struct sk_buff *skb)
1009 {
1010 	return 1;
1011 }
1012 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1013 					     struct sk_buff *skb)
1014 {
1015 	return 1;
1016 }
1017 #endif
1018 
1019 static __inline__
1020 xfrm_address_t *xfrm_flowi_daddr(struct flowi *fl, unsigned short family)
1021 {
1022 	switch (family){
1023 	case AF_INET:
1024 		return (xfrm_address_t *)&fl->fl4_dst;
1025 	case AF_INET6:
1026 		return (xfrm_address_t *)&fl->fl6_dst;
1027 	}
1028 	return NULL;
1029 }
1030 
1031 static __inline__
1032 xfrm_address_t *xfrm_flowi_saddr(struct flowi *fl, unsigned short family)
1033 {
1034 	switch (family){
1035 	case AF_INET:
1036 		return (xfrm_address_t *)&fl->fl4_src;
1037 	case AF_INET6:
1038 		return (xfrm_address_t *)&fl->fl6_src;
1039 	}
1040 	return NULL;
1041 }
1042 
1043 static __inline__ int
1044 __xfrm4_state_addr_check(struct xfrm_state *x,
1045 			 xfrm_address_t *daddr, xfrm_address_t *saddr)
1046 {
1047 	if (daddr->a4 == x->id.daddr.a4 &&
1048 	    (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1049 		return 1;
1050 	return 0;
1051 }
1052 
1053 static __inline__ int
1054 __xfrm6_state_addr_check(struct xfrm_state *x,
1055 			 xfrm_address_t *daddr, xfrm_address_t *saddr)
1056 {
1057 	if (!ipv6_addr_cmp((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1058 	    (!ipv6_addr_cmp((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr)||
1059 	     ipv6_addr_any((struct in6_addr *)saddr) ||
1060 	     ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1061 		return 1;
1062 	return 0;
1063 }
1064 
1065 static __inline__ int
1066 xfrm_state_addr_check(struct xfrm_state *x,
1067 		      xfrm_address_t *daddr, xfrm_address_t *saddr,
1068 		      unsigned short family)
1069 {
1070 	switch (family) {
1071 	case AF_INET:
1072 		return __xfrm4_state_addr_check(x, daddr, saddr);
1073 	case AF_INET6:
1074 		return __xfrm6_state_addr_check(x, daddr, saddr);
1075 	}
1076 	return 0;
1077 }
1078 
1079 static __inline__ int
1080 xfrm_state_addr_flow_check(struct xfrm_state *x, struct flowi *fl,
1081 			   unsigned short family)
1082 {
1083 	switch (family) {
1084 	case AF_INET:
1085 		return __xfrm4_state_addr_check(x,
1086 						(xfrm_address_t *)&fl->fl4_dst,
1087 						(xfrm_address_t *)&fl->fl4_src);
1088 	case AF_INET6:
1089 		return __xfrm6_state_addr_check(x,
1090 						(xfrm_address_t *)&fl->fl6_dst,
1091 						(xfrm_address_t *)&fl->fl6_src);
1092 	}
1093 	return 0;
1094 }
1095 
1096 static inline int xfrm_state_kern(struct xfrm_state *x)
1097 {
1098 	return atomic_read(&x->tunnel_users);
1099 }
1100 
1101 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1102 {
1103 	return (!userproto || proto == userproto ||
1104 		(userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1105 						  proto == IPPROTO_ESP ||
1106 						  proto == IPPROTO_COMP)));
1107 }
1108 
1109 /*
1110  * xfrm algorithm information
1111  */
1112 struct xfrm_algo_aead_info {
1113 	u16 icv_truncbits;
1114 };
1115 
1116 struct xfrm_algo_auth_info {
1117 	u16 icv_truncbits;
1118 	u16 icv_fullbits;
1119 };
1120 
1121 struct xfrm_algo_encr_info {
1122 	u16 blockbits;
1123 	u16 defkeybits;
1124 };
1125 
1126 struct xfrm_algo_comp_info {
1127 	u16 threshold;
1128 };
1129 
1130 struct xfrm_algo_desc {
1131 	char *name;
1132 	char *compat;
1133 	u8 available:1;
1134 	union {
1135 		struct xfrm_algo_aead_info aead;
1136 		struct xfrm_algo_auth_info auth;
1137 		struct xfrm_algo_encr_info encr;
1138 		struct xfrm_algo_comp_info comp;
1139 	} uinfo;
1140 	struct sadb_alg desc;
1141 };
1142 
1143 /* XFRM tunnel handlers.  */
1144 struct xfrm_tunnel {
1145 	int (*handler)(struct sk_buff *skb);
1146 	int (*err_handler)(struct sk_buff *skb, __u32 info);
1147 
1148 	struct xfrm_tunnel *next;
1149 	int priority;
1150 };
1151 
1152 struct xfrm6_tunnel {
1153 	int (*handler)(struct sk_buff *skb);
1154 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1155 			   int type, int code, int offset, __be32 info);
1156 	struct xfrm6_tunnel *next;
1157 	int priority;
1158 };
1159 
1160 extern void xfrm_init(void);
1161 extern void xfrm4_init(void);
1162 extern void xfrm_state_init(void);
1163 extern void xfrm4_state_init(void);
1164 #ifdef CONFIG_XFRM
1165 extern int xfrm6_init(void);
1166 extern void xfrm6_fini(void);
1167 extern int xfrm6_state_init(void);
1168 extern void xfrm6_state_fini(void);
1169 #else
1170 static inline int xfrm6_init(void)
1171 {
1172 	return 0;
1173 }
1174 static inline void xfrm6_fini(void)
1175 {
1176 	;
1177 }
1178 #endif
1179 
1180 #ifdef CONFIG_XFRM_STATISTICS
1181 extern int xfrm_proc_init(void);
1182 #endif
1183 
1184 extern int xfrm_state_walk(u8 proto, int (*func)(struct xfrm_state *, int, void*), void *);
1185 extern struct xfrm_state *xfrm_state_alloc(void);
1186 extern struct xfrm_state *xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr,
1187 					  struct flowi *fl, struct xfrm_tmpl *tmpl,
1188 					  struct xfrm_policy *pol, int *err,
1189 					  unsigned short family);
1190 extern struct xfrm_state * xfrm_stateonly_find(xfrm_address_t *daddr,
1191 					       xfrm_address_t *saddr,
1192 					       unsigned short family,
1193 					       u8 mode, u8 proto, u32 reqid);
1194 extern int xfrm_state_check_expire(struct xfrm_state *x);
1195 extern void xfrm_state_insert(struct xfrm_state *x);
1196 extern int xfrm_state_add(struct xfrm_state *x);
1197 extern int xfrm_state_update(struct xfrm_state *x);
1198 extern struct xfrm_state *xfrm_state_lookup(xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family);
1199 extern struct xfrm_state *xfrm_state_lookup_byaddr(xfrm_address_t *daddr, xfrm_address_t *saddr, u8 proto, unsigned short family);
1200 #ifdef CONFIG_XFRM_SUB_POLICY
1201 extern int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1202 			  int n, unsigned short family);
1203 extern int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1204 			   int n, unsigned short family);
1205 #else
1206 static inline int xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src,
1207 				 int n, unsigned short family)
1208 {
1209 	return -ENOSYS;
1210 }
1211 
1212 static inline int xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src,
1213 				  int n, unsigned short family)
1214 {
1215 	return -ENOSYS;
1216 }
1217 #endif
1218 
1219 struct xfrmk_sadinfo {
1220 	u32 sadhcnt; /* current hash bkts */
1221 	u32 sadhmcnt; /* max allowed hash bkts */
1222 	u32 sadcnt; /* current running count */
1223 };
1224 
1225 struct xfrmk_spdinfo {
1226 	u32 incnt;
1227 	u32 outcnt;
1228 	u32 fwdcnt;
1229 	u32 inscnt;
1230 	u32 outscnt;
1231 	u32 fwdscnt;
1232 	u32 spdhcnt;
1233 	u32 spdhmcnt;
1234 };
1235 
1236 extern struct xfrm_state *xfrm_find_acq_byseq(u32 seq);
1237 extern int xfrm_state_delete(struct xfrm_state *x);
1238 extern int xfrm_state_flush(u8 proto, struct xfrm_audit *audit_info);
1239 extern void xfrm_sad_getinfo(struct xfrmk_sadinfo *si);
1240 extern void xfrm_spd_getinfo(struct xfrmk_spdinfo *si);
1241 extern int xfrm_replay_check(struct xfrm_state *x,
1242 			     struct sk_buff *skb, __be32 seq);
1243 extern void xfrm_replay_advance(struct xfrm_state *x, __be32 seq);
1244 extern void xfrm_replay_notify(struct xfrm_state *x, int event);
1245 extern int xfrm_state_mtu(struct xfrm_state *x, int mtu);
1246 extern int xfrm_init_state(struct xfrm_state *x);
1247 extern int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb);
1248 extern int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi,
1249 		      int encap_type);
1250 extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1251 extern int xfrm_output_resume(struct sk_buff *skb, int err);
1252 extern int xfrm_output(struct sk_buff *skb);
1253 extern int xfrm4_extract_header(struct sk_buff *skb);
1254 extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1255 extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1256 			   int encap_type);
1257 extern int xfrm4_transport_finish(struct sk_buff *skb, int async);
1258 extern int xfrm4_rcv(struct sk_buff *skb);
1259 
1260 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1261 {
1262 	return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
1263 }
1264 
1265 extern int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1266 extern int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1267 extern int xfrm4_output(struct sk_buff *skb);
1268 extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1269 extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1270 extern int xfrm6_extract_header(struct sk_buff *skb);
1271 extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
1272 extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
1273 extern int xfrm6_transport_finish(struct sk_buff *skb, int async);
1274 extern int xfrm6_rcv(struct sk_buff *skb);
1275 extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1276 			    xfrm_address_t *saddr, u8 proto);
1277 extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1278 extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1279 extern __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr);
1280 extern void xfrm6_tunnel_free_spi(xfrm_address_t *saddr);
1281 extern __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr);
1282 extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb);
1283 extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
1284 extern int xfrm6_output(struct sk_buff *skb);
1285 extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
1286 				 u8 **prevhdr);
1287 
1288 #ifdef CONFIG_XFRM
1289 extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1290 extern int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen);
1291 #else
1292 static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
1293 {
1294  	return -ENOPROTOOPT;
1295 }
1296 
1297 static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
1298 {
1299  	/* should not happen */
1300  	kfree_skb(skb);
1301 	return 0;
1302 }
1303 #endif
1304 
1305 struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp);
1306 extern int xfrm_policy_walk(u8 type, int (*func)(struct xfrm_policy *, int, int, void*), void *);
1307 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1308 struct xfrm_policy *xfrm_policy_bysel_ctx(u8 type, int dir,
1309 					  struct xfrm_selector *sel,
1310 					  struct xfrm_sec_ctx *ctx, int delete,
1311 					  int *err);
1312 struct xfrm_policy *xfrm_policy_byid(u8, int dir, u32 id, int delete, int *err);
1313 int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info);
1314 u32 xfrm_get_acqseq(void);
1315 extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
1316 struct xfrm_state * xfrm_find_acq(u8 mode, u32 reqid, u8 proto,
1317 				  xfrm_address_t *daddr, xfrm_address_t *saddr,
1318 				  int create, unsigned short family);
1319 extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1320 extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst,
1321 			  struct flowi *fl, int family, int strict);
1322 
1323 #ifdef CONFIG_XFRM_MIGRATE
1324 extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1325 		      struct xfrm_migrate *m, int num_bundles);
1326 extern struct xfrm_state * xfrm_migrate_state_find(struct xfrm_migrate *m);
1327 extern struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x,
1328 					      struct xfrm_migrate *m);
1329 extern int xfrm_migrate(struct xfrm_selector *sel, u8 dir, u8 type,
1330 			struct xfrm_migrate *m, int num_bundles);
1331 #endif
1332 
1333 extern wait_queue_head_t km_waitq;
1334 extern int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1335 extern void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 pid);
1336 extern int km_report(u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
1337 
1338 extern void xfrm_input_init(void);
1339 extern int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1340 
1341 extern void xfrm_probe_algs(void);
1342 extern int xfrm_count_auth_supported(void);
1343 extern int xfrm_count_enc_supported(void);
1344 extern struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1345 extern struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1346 extern struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1347 extern struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1348 extern struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1349 extern struct xfrm_algo_desc *xfrm_aalg_get_byname(char *name, int probe);
1350 extern struct xfrm_algo_desc *xfrm_ealg_get_byname(char *name, int probe);
1351 extern struct xfrm_algo_desc *xfrm_calg_get_byname(char *name, int probe);
1352 extern struct xfrm_algo_desc *xfrm_aead_get_byname(char *name, int icv_len,
1353 						   int probe);
1354 
1355 struct hash_desc;
1356 struct scatterlist;
1357 typedef int (icv_update_fn_t)(struct hash_desc *, struct scatterlist *,
1358 			      unsigned int);
1359 
1360 extern int skb_icv_walk(const struct sk_buff *skb, struct hash_desc *tfm,
1361 			int offset, int len, icv_update_fn_t icv_update);
1362 
1363 static inline int xfrm_addr_cmp(xfrm_address_t *a, xfrm_address_t *b,
1364 				int family)
1365 {
1366 	switch (family) {
1367 	default:
1368 	case AF_INET:
1369 		return (__force __u32)a->a4 - (__force __u32)b->a4;
1370 	case AF_INET6:
1371 		return ipv6_addr_cmp((struct in6_addr *)a,
1372 				     (struct in6_addr *)b);
1373 	}
1374 }
1375 
1376 static inline int xfrm_policy_id2dir(u32 index)
1377 {
1378 	return index & 7;
1379 }
1380 
1381 static inline int xfrm_aevent_is_on(void)
1382 {
1383 	struct sock *nlsk;
1384 	int ret = 0;
1385 
1386 	rcu_read_lock();
1387 	nlsk = rcu_dereference(xfrm_nl);
1388 	if (nlsk)
1389 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1390 	rcu_read_unlock();
1391 	return ret;
1392 }
1393 
1394 static inline int xfrm_alg_len(struct xfrm_algo *alg)
1395 {
1396 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1397 }
1398 
1399 #ifdef CONFIG_XFRM_MIGRATE
1400 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
1401 {
1402 	return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
1403 }
1404 
1405 static inline void xfrm_states_put(struct xfrm_state **states, int n)
1406 {
1407 	int i;
1408 	for (i = 0; i < n; i++)
1409 		xfrm_state_put(*(states + i));
1410 }
1411 
1412 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
1413 {
1414 	int i;
1415 	for (i = 0; i < n; i++)
1416 		xfrm_state_delete(*(states + i));
1417 }
1418 #endif
1419 
1420 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1421 {
1422 	return skb->sp->xvec[skb->sp->len - 1];
1423 }
1424 
1425 #endif	/* _NET_XFRM_H */
1426