xref: /linux/net/xfrm/xfrm_state.c (revision 05b8673963c492fe36533e99a4a3c6661ca09ed0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * xfrm_state.c
4  *
5  * Changes:
6  *	Mitsuru KANDA @USAGI
7  * 	Kazunori MIYAZAWA @USAGI
8  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9  * 		IPv6 support
10  * 	YOSHIFUJI Hideaki @USAGI
11  * 		Split up af-specific functions
12  *	Derek Atkins <derek@ihtfp.com>
13  *		Add UDP Encapsulation
14  *
15  */
16 
17 #include <linux/compat.h>
18 #include <linux/workqueue.h>
19 #include <net/xfrm.h>
20 #include <linux/pfkeyv2.h>
21 #include <linux/ipsec.h>
22 #include <linux/module.h>
23 #include <linux/cache.h>
24 #include <linux/audit.h>
25 #include <linux/uaccess.h>
26 #include <linux/ktime.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/kernel.h>
30 
31 #include <crypto/aead.h>
32 
33 #include "xfrm_hash.h"
34 
35 #define xfrm_state_deref_prot(table, net) \
36 	rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
37 #define xfrm_state_deref_check(table, net) \
38 	rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
39 
40 static void xfrm_state_gc_task(struct work_struct *work);
41 
42 /* Each xfrm_state may be linked to two tables:
43 
44    1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
45    2. Hash table by (daddr,family,reqid) to find what SAs exist for given
46       destination/tunnel endpoint. (output)
47  */
48 
49 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
50 static struct kmem_cache *xfrm_state_cache __ro_after_init;
51 
52 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
53 static HLIST_HEAD(xfrm_state_gc_list);
54 static HLIST_HEAD(xfrm_state_dev_gc_list);
55 
56 static inline bool xfrm_state_hold_rcu(struct xfrm_state *x)
57 {
58 	return refcount_inc_not_zero(&x->refcnt);
59 }
60 
61 static inline unsigned int xfrm_dst_hash(struct net *net,
62 					 const xfrm_address_t *daddr,
63 					 const xfrm_address_t *saddr,
64 					 u32 reqid,
65 					 unsigned short family)
66 {
67 	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
68 
69 	return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
70 }
71 
72 static inline unsigned int xfrm_src_hash(struct net *net,
73 					 const xfrm_address_t *daddr,
74 					 const xfrm_address_t *saddr,
75 					 unsigned short family)
76 {
77 	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
78 
79 	return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
80 }
81 
82 static inline unsigned int
83 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
84 	      __be32 spi, u8 proto, unsigned short family)
85 {
86 	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
87 
88 	return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
89 }
90 
91 static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
92 {
93 	lockdep_assert_held(&net->xfrm.xfrm_state_lock);
94 
95 	return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
96 }
97 
98 #define XFRM_STATE_INSERT(by, _n, _h, _type)                               \
99 	{                                                                  \
100 		struct xfrm_state *_x = NULL;                              \
101 									   \
102 		if (_type != XFRM_DEV_OFFLOAD_PACKET) {                    \
103 			hlist_for_each_entry_rcu(_x, _h, by) {             \
104 				if (_x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
105 					continue;                          \
106 				break;                                     \
107 			}                                                  \
108 		}                                                          \
109 									   \
110 		if (!_x || _x->xso.type == XFRM_DEV_OFFLOAD_PACKET)        \
111 			/* SAD is empty or consist from HW SAs only */     \
112 			hlist_add_head_rcu(_n, _h);                        \
113 		else                                                       \
114 			hlist_add_before_rcu(_n, &_x->by);                 \
115 	}
116 
117 static void xfrm_hash_transfer(struct hlist_head *list,
118 			       struct hlist_head *ndsttable,
119 			       struct hlist_head *nsrctable,
120 			       struct hlist_head *nspitable,
121 			       struct hlist_head *nseqtable,
122 			       unsigned int nhashmask)
123 {
124 	struct hlist_node *tmp;
125 	struct xfrm_state *x;
126 
127 	hlist_for_each_entry_safe(x, tmp, list, bydst) {
128 		unsigned int h;
129 
130 		h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
131 				    x->props.reqid, x->props.family,
132 				    nhashmask);
133 		XFRM_STATE_INSERT(bydst, &x->bydst, ndsttable + h, x->xso.type);
134 
135 		h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
136 				    x->props.family,
137 				    nhashmask);
138 		XFRM_STATE_INSERT(bysrc, &x->bysrc, nsrctable + h, x->xso.type);
139 
140 		if (x->id.spi) {
141 			h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
142 					    x->id.proto, x->props.family,
143 					    nhashmask);
144 			XFRM_STATE_INSERT(byspi, &x->byspi, nspitable + h,
145 					  x->xso.type);
146 		}
147 
148 		if (x->km.seq) {
149 			h = __xfrm_seq_hash(x->km.seq, nhashmask);
150 			XFRM_STATE_INSERT(byseq, &x->byseq, nseqtable + h,
151 					  x->xso.type);
152 		}
153 	}
154 }
155 
156 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
157 {
158 	return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
159 }
160 
161 static void xfrm_hash_resize(struct work_struct *work)
162 {
163 	struct net *net = container_of(work, struct net, xfrm.state_hash_work);
164 	struct hlist_head *ndst, *nsrc, *nspi, *nseq, *odst, *osrc, *ospi, *oseq;
165 	unsigned long nsize, osize;
166 	unsigned int nhashmask, ohashmask;
167 	int i;
168 
169 	nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
170 	ndst = xfrm_hash_alloc(nsize);
171 	if (!ndst)
172 		return;
173 	nsrc = xfrm_hash_alloc(nsize);
174 	if (!nsrc) {
175 		xfrm_hash_free(ndst, nsize);
176 		return;
177 	}
178 	nspi = xfrm_hash_alloc(nsize);
179 	if (!nspi) {
180 		xfrm_hash_free(ndst, nsize);
181 		xfrm_hash_free(nsrc, nsize);
182 		return;
183 	}
184 	nseq = xfrm_hash_alloc(nsize);
185 	if (!nseq) {
186 		xfrm_hash_free(ndst, nsize);
187 		xfrm_hash_free(nsrc, nsize);
188 		xfrm_hash_free(nspi, nsize);
189 		return;
190 	}
191 
192 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
193 	write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
194 
195 	nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
196 	odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
197 	for (i = net->xfrm.state_hmask; i >= 0; i--)
198 		xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nseq, nhashmask);
199 
200 	osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
201 	ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
202 	oseq = xfrm_state_deref_prot(net->xfrm.state_byseq, net);
203 	ohashmask = net->xfrm.state_hmask;
204 
205 	rcu_assign_pointer(net->xfrm.state_bydst, ndst);
206 	rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
207 	rcu_assign_pointer(net->xfrm.state_byspi, nspi);
208 	rcu_assign_pointer(net->xfrm.state_byseq, nseq);
209 	net->xfrm.state_hmask = nhashmask;
210 
211 	write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
212 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
213 
214 	osize = (ohashmask + 1) * sizeof(struct hlist_head);
215 
216 	synchronize_rcu();
217 
218 	xfrm_hash_free(odst, osize);
219 	xfrm_hash_free(osrc, osize);
220 	xfrm_hash_free(ospi, osize);
221 	xfrm_hash_free(oseq, osize);
222 }
223 
224 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
225 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
226 
227 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
228 static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock);
229 
230 int __xfrm_state_delete(struct xfrm_state *x);
231 
232 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
233 static bool km_is_alive(const struct km_event *c);
234 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
235 
236 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
237 {
238 	struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
239 	int err = 0;
240 
241 	if (!afinfo)
242 		return -EAFNOSUPPORT;
243 
244 #define X(afi, T, name) do {			\
245 		WARN_ON((afi)->type_ ## name);	\
246 		(afi)->type_ ## name = (T);	\
247 	} while (0)
248 
249 	switch (type->proto) {
250 	case IPPROTO_COMP:
251 		X(afinfo, type, comp);
252 		break;
253 	case IPPROTO_AH:
254 		X(afinfo, type, ah);
255 		break;
256 	case IPPROTO_ESP:
257 		X(afinfo, type, esp);
258 		break;
259 	case IPPROTO_IPIP:
260 		X(afinfo, type, ipip);
261 		break;
262 	case IPPROTO_DSTOPTS:
263 		X(afinfo, type, dstopts);
264 		break;
265 	case IPPROTO_ROUTING:
266 		X(afinfo, type, routing);
267 		break;
268 	case IPPROTO_IPV6:
269 		X(afinfo, type, ipip6);
270 		break;
271 	default:
272 		WARN_ON(1);
273 		err = -EPROTONOSUPPORT;
274 		break;
275 	}
276 #undef X
277 	rcu_read_unlock();
278 	return err;
279 }
280 EXPORT_SYMBOL(xfrm_register_type);
281 
282 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
283 {
284 	struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
285 
286 	if (unlikely(afinfo == NULL))
287 		return;
288 
289 #define X(afi, T, name) do {				\
290 		WARN_ON((afi)->type_ ## name != (T));	\
291 		(afi)->type_ ## name = NULL;		\
292 	} while (0)
293 
294 	switch (type->proto) {
295 	case IPPROTO_COMP:
296 		X(afinfo, type, comp);
297 		break;
298 	case IPPROTO_AH:
299 		X(afinfo, type, ah);
300 		break;
301 	case IPPROTO_ESP:
302 		X(afinfo, type, esp);
303 		break;
304 	case IPPROTO_IPIP:
305 		X(afinfo, type, ipip);
306 		break;
307 	case IPPROTO_DSTOPTS:
308 		X(afinfo, type, dstopts);
309 		break;
310 	case IPPROTO_ROUTING:
311 		X(afinfo, type, routing);
312 		break;
313 	case IPPROTO_IPV6:
314 		X(afinfo, type, ipip6);
315 		break;
316 	default:
317 		WARN_ON(1);
318 		break;
319 	}
320 #undef X
321 	rcu_read_unlock();
322 }
323 EXPORT_SYMBOL(xfrm_unregister_type);
324 
325 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
326 {
327 	const struct xfrm_type *type = NULL;
328 	struct xfrm_state_afinfo *afinfo;
329 	int modload_attempted = 0;
330 
331 retry:
332 	afinfo = xfrm_state_get_afinfo(family);
333 	if (unlikely(afinfo == NULL))
334 		return NULL;
335 
336 	switch (proto) {
337 	case IPPROTO_COMP:
338 		type = afinfo->type_comp;
339 		break;
340 	case IPPROTO_AH:
341 		type = afinfo->type_ah;
342 		break;
343 	case IPPROTO_ESP:
344 		type = afinfo->type_esp;
345 		break;
346 	case IPPROTO_IPIP:
347 		type = afinfo->type_ipip;
348 		break;
349 	case IPPROTO_DSTOPTS:
350 		type = afinfo->type_dstopts;
351 		break;
352 	case IPPROTO_ROUTING:
353 		type = afinfo->type_routing;
354 		break;
355 	case IPPROTO_IPV6:
356 		type = afinfo->type_ipip6;
357 		break;
358 	default:
359 		break;
360 	}
361 
362 	if (unlikely(type && !try_module_get(type->owner)))
363 		type = NULL;
364 
365 	rcu_read_unlock();
366 
367 	if (!type && !modload_attempted) {
368 		request_module("xfrm-type-%d-%d", family, proto);
369 		modload_attempted = 1;
370 		goto retry;
371 	}
372 
373 	return type;
374 }
375 
376 static void xfrm_put_type(const struct xfrm_type *type)
377 {
378 	module_put(type->owner);
379 }
380 
381 int xfrm_register_type_offload(const struct xfrm_type_offload *type,
382 			       unsigned short family)
383 {
384 	struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
385 	int err = 0;
386 
387 	if (unlikely(afinfo == NULL))
388 		return -EAFNOSUPPORT;
389 
390 	switch (type->proto) {
391 	case IPPROTO_ESP:
392 		WARN_ON(afinfo->type_offload_esp);
393 		afinfo->type_offload_esp = type;
394 		break;
395 	default:
396 		WARN_ON(1);
397 		err = -EPROTONOSUPPORT;
398 		break;
399 	}
400 
401 	rcu_read_unlock();
402 	return err;
403 }
404 EXPORT_SYMBOL(xfrm_register_type_offload);
405 
406 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
407 				  unsigned short family)
408 {
409 	struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
410 
411 	if (unlikely(afinfo == NULL))
412 		return;
413 
414 	switch (type->proto) {
415 	case IPPROTO_ESP:
416 		WARN_ON(afinfo->type_offload_esp != type);
417 		afinfo->type_offload_esp = NULL;
418 		break;
419 	default:
420 		WARN_ON(1);
421 		break;
422 	}
423 	rcu_read_unlock();
424 }
425 EXPORT_SYMBOL(xfrm_unregister_type_offload);
426 
427 void xfrm_set_type_offload(struct xfrm_state *x, bool try_load)
428 {
429 	const struct xfrm_type_offload *type = NULL;
430 	struct xfrm_state_afinfo *afinfo;
431 
432 retry:
433 	afinfo = xfrm_state_get_afinfo(x->props.family);
434 	if (unlikely(afinfo == NULL))
435 		goto out;
436 
437 	switch (x->id.proto) {
438 	case IPPROTO_ESP:
439 		type = afinfo->type_offload_esp;
440 		break;
441 	default:
442 		break;
443 	}
444 
445 	if ((type && !try_module_get(type->owner)))
446 		type = NULL;
447 
448 	rcu_read_unlock();
449 
450 	if (!type && try_load) {
451 		request_module("xfrm-offload-%d-%d", x->props.family,
452 			       x->id.proto);
453 		try_load = false;
454 		goto retry;
455 	}
456 
457 out:
458 	x->type_offload = type;
459 }
460 EXPORT_SYMBOL(xfrm_set_type_offload);
461 
462 static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = {
463 	[XFRM_MODE_BEET] = {
464 		.encap = XFRM_MODE_BEET,
465 		.flags = XFRM_MODE_FLAG_TUNNEL,
466 		.family = AF_INET,
467 	},
468 	[XFRM_MODE_TRANSPORT] = {
469 		.encap = XFRM_MODE_TRANSPORT,
470 		.family = AF_INET,
471 	},
472 	[XFRM_MODE_TUNNEL] = {
473 		.encap = XFRM_MODE_TUNNEL,
474 		.flags = XFRM_MODE_FLAG_TUNNEL,
475 		.family = AF_INET,
476 	},
477 	[XFRM_MODE_IPTFS] = {
478 		.encap = XFRM_MODE_IPTFS,
479 		.flags = XFRM_MODE_FLAG_TUNNEL,
480 		.family = AF_INET,
481 	},
482 };
483 
484 static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = {
485 	[XFRM_MODE_BEET] = {
486 		.encap = XFRM_MODE_BEET,
487 		.flags = XFRM_MODE_FLAG_TUNNEL,
488 		.family = AF_INET6,
489 	},
490 	[XFRM_MODE_ROUTEOPTIMIZATION] = {
491 		.encap = XFRM_MODE_ROUTEOPTIMIZATION,
492 		.family = AF_INET6,
493 	},
494 	[XFRM_MODE_TRANSPORT] = {
495 		.encap = XFRM_MODE_TRANSPORT,
496 		.family = AF_INET6,
497 	},
498 	[XFRM_MODE_TUNNEL] = {
499 		.encap = XFRM_MODE_TUNNEL,
500 		.flags = XFRM_MODE_FLAG_TUNNEL,
501 		.family = AF_INET6,
502 	},
503 	[XFRM_MODE_IPTFS] = {
504 		.encap = XFRM_MODE_IPTFS,
505 		.flags = XFRM_MODE_FLAG_TUNNEL,
506 		.family = AF_INET6,
507 	},
508 };
509 
510 static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
511 {
512 	const struct xfrm_mode *mode;
513 
514 	if (unlikely(encap >= XFRM_MODE_MAX))
515 		return NULL;
516 
517 	switch (family) {
518 	case AF_INET:
519 		mode = &xfrm4_mode_map[encap];
520 		if (mode->family == family)
521 			return mode;
522 		break;
523 	case AF_INET6:
524 		mode = &xfrm6_mode_map[encap];
525 		if (mode->family == family)
526 			return mode;
527 		break;
528 	default:
529 		break;
530 	}
531 
532 	return NULL;
533 }
534 
535 static const struct xfrm_mode_cbs  __rcu *xfrm_mode_cbs_map[XFRM_MODE_MAX];
536 static DEFINE_SPINLOCK(xfrm_mode_cbs_map_lock);
537 
538 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs)
539 {
540 	if (mode >= XFRM_MODE_MAX)
541 		return -EINVAL;
542 
543 	spin_lock_bh(&xfrm_mode_cbs_map_lock);
544 	rcu_assign_pointer(xfrm_mode_cbs_map[mode], mode_cbs);
545 	spin_unlock_bh(&xfrm_mode_cbs_map_lock);
546 
547 	return 0;
548 }
549 EXPORT_SYMBOL(xfrm_register_mode_cbs);
550 
551 void xfrm_unregister_mode_cbs(u8 mode)
552 {
553 	if (mode >= XFRM_MODE_MAX)
554 		return;
555 
556 	spin_lock_bh(&xfrm_mode_cbs_map_lock);
557 	RCU_INIT_POINTER(xfrm_mode_cbs_map[mode], NULL);
558 	spin_unlock_bh(&xfrm_mode_cbs_map_lock);
559 	synchronize_rcu();
560 }
561 EXPORT_SYMBOL(xfrm_unregister_mode_cbs);
562 
563 static const struct xfrm_mode_cbs *xfrm_get_mode_cbs(u8 mode)
564 {
565 	const struct xfrm_mode_cbs *cbs;
566 	bool try_load = true;
567 
568 	if (mode >= XFRM_MODE_MAX)
569 		return NULL;
570 
571 retry:
572 	rcu_read_lock();
573 
574 	cbs = rcu_dereference(xfrm_mode_cbs_map[mode]);
575 	if (cbs && !try_module_get(cbs->owner))
576 		cbs = NULL;
577 
578 	rcu_read_unlock();
579 
580 	if (mode == XFRM_MODE_IPTFS && !cbs && try_load) {
581 		request_module("xfrm-iptfs");
582 		try_load = false;
583 		goto retry;
584 	}
585 
586 	return cbs;
587 }
588 
589 void xfrm_state_free(struct xfrm_state *x)
590 {
591 	kmem_cache_free(xfrm_state_cache, x);
592 }
593 EXPORT_SYMBOL(xfrm_state_free);
594 
595 static void xfrm_state_delete_tunnel(struct xfrm_state *x);
596 static void xfrm_state_gc_destroy(struct xfrm_state *x)
597 {
598 	if (x->mode_cbs && x->mode_cbs->destroy_state)
599 		x->mode_cbs->destroy_state(x);
600 	hrtimer_cancel(&x->mtimer);
601 	timer_delete_sync(&x->rtimer);
602 	kfree_sensitive(x->aead);
603 	kfree_sensitive(x->aalg);
604 	kfree_sensitive(x->ealg);
605 	kfree(x->calg);
606 	kfree(x->encap);
607 	kfree(x->coaddr);
608 	kfree(x->replay_esn);
609 	kfree(x->preplay_esn);
610 	xfrm_unset_type_offload(x);
611 	xfrm_state_delete_tunnel(x);
612 	if (x->type) {
613 		x->type->destructor(x);
614 		xfrm_put_type(x->type);
615 	}
616 	if (x->xfrag.page)
617 		put_page(x->xfrag.page);
618 	xfrm_dev_state_free(x);
619 	security_xfrm_state_free(x);
620 	xfrm_state_free(x);
621 }
622 
623 static void xfrm_state_gc_task(struct work_struct *work)
624 {
625 	struct xfrm_state *x;
626 	struct hlist_node *tmp;
627 	struct hlist_head gc_list;
628 
629 	spin_lock_bh(&xfrm_state_gc_lock);
630 	hlist_move_list(&xfrm_state_gc_list, &gc_list);
631 	spin_unlock_bh(&xfrm_state_gc_lock);
632 
633 	synchronize_rcu();
634 
635 	hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
636 		xfrm_state_gc_destroy(x);
637 }
638 
639 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
640 {
641 	struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer);
642 	enum hrtimer_restart ret = HRTIMER_NORESTART;
643 	time64_t now = ktime_get_real_seconds();
644 	time64_t next = TIME64_MAX;
645 	int warn = 0;
646 	int err = 0;
647 
648 	spin_lock(&x->lock);
649 	xfrm_dev_state_update_stats(x);
650 
651 	if (x->km.state == XFRM_STATE_DEAD)
652 		goto out;
653 	if (x->km.state == XFRM_STATE_EXPIRED)
654 		goto expired;
655 	if (x->lft.hard_add_expires_seconds) {
656 		time64_t tmo = x->lft.hard_add_expires_seconds +
657 			x->curlft.add_time - now;
658 		if (tmo <= 0) {
659 			if (x->xflags & XFRM_SOFT_EXPIRE) {
660 				/* enter hard expire without soft expire first?!
661 				 * setting a new date could trigger this.
662 				 * workaround: fix x->curflt.add_time by below:
663 				 */
664 				x->curlft.add_time = now - x->saved_tmo - 1;
665 				tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
666 			} else
667 				goto expired;
668 		}
669 		if (tmo < next)
670 			next = tmo;
671 	}
672 	if (x->lft.hard_use_expires_seconds) {
673 		time64_t tmo = x->lft.hard_use_expires_seconds +
674 			(READ_ONCE(x->curlft.use_time) ? : now) - now;
675 		if (tmo <= 0)
676 			goto expired;
677 		if (tmo < next)
678 			next = tmo;
679 	}
680 	if (x->km.dying)
681 		goto resched;
682 	if (x->lft.soft_add_expires_seconds) {
683 		time64_t tmo = x->lft.soft_add_expires_seconds +
684 			x->curlft.add_time - now;
685 		if (tmo <= 0) {
686 			warn = 1;
687 			x->xflags &= ~XFRM_SOFT_EXPIRE;
688 		} else if (tmo < next) {
689 			next = tmo;
690 			x->xflags |= XFRM_SOFT_EXPIRE;
691 			x->saved_tmo = tmo;
692 		}
693 	}
694 	if (x->lft.soft_use_expires_seconds) {
695 		time64_t tmo = x->lft.soft_use_expires_seconds +
696 			(READ_ONCE(x->curlft.use_time) ? : now) - now;
697 		if (tmo <= 0)
698 			warn = 1;
699 		else if (tmo < next)
700 			next = tmo;
701 	}
702 
703 	x->km.dying = warn;
704 	if (warn)
705 		km_state_expired(x, 0, 0);
706 resched:
707 	if (next != TIME64_MAX) {
708 		hrtimer_forward_now(&x->mtimer, ktime_set(next, 0));
709 		ret = HRTIMER_RESTART;
710 	}
711 
712 	goto out;
713 
714 expired:
715 	if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
716 		x->km.state = XFRM_STATE_EXPIRED;
717 
718 	err = __xfrm_state_delete(x);
719 	if (!err)
720 		km_state_expired(x, 1, 0);
721 
722 	xfrm_audit_state_delete(x, err ? 0 : 1, true);
723 
724 out:
725 	spin_unlock(&x->lock);
726 	return ret;
727 }
728 
729 static void xfrm_replay_timer_handler(struct timer_list *t);
730 
731 struct xfrm_state *xfrm_state_alloc(struct net *net)
732 {
733 	struct xfrm_state *x;
734 
735 	x = kmem_cache_zalloc(xfrm_state_cache, GFP_ATOMIC);
736 
737 	if (x) {
738 		write_pnet(&x->xs_net, net);
739 		refcount_set(&x->refcnt, 1);
740 		atomic_set(&x->tunnel_users, 0);
741 		INIT_LIST_HEAD(&x->km.all);
742 		INIT_HLIST_NODE(&x->state_cache);
743 		INIT_HLIST_NODE(&x->bydst);
744 		INIT_HLIST_NODE(&x->bysrc);
745 		INIT_HLIST_NODE(&x->byspi);
746 		INIT_HLIST_NODE(&x->byseq);
747 		hrtimer_setup(&x->mtimer, xfrm_timer_handler, CLOCK_BOOTTIME,
748 			      HRTIMER_MODE_ABS_SOFT);
749 		timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
750 		x->curlft.add_time = ktime_get_real_seconds();
751 		x->lft.soft_byte_limit = XFRM_INF;
752 		x->lft.soft_packet_limit = XFRM_INF;
753 		x->lft.hard_byte_limit = XFRM_INF;
754 		x->lft.hard_packet_limit = XFRM_INF;
755 		x->replay_maxage = 0;
756 		x->replay_maxdiff = 0;
757 		x->pcpu_num = UINT_MAX;
758 		spin_lock_init(&x->lock);
759 		x->mode_data = NULL;
760 	}
761 	return x;
762 }
763 EXPORT_SYMBOL(xfrm_state_alloc);
764 
765 #ifdef CONFIG_XFRM_OFFLOAD
766 void xfrm_dev_state_delete(struct xfrm_state *x)
767 {
768 	struct xfrm_dev_offload *xso = &x->xso;
769 	struct net_device *dev = READ_ONCE(xso->dev);
770 
771 	if (dev) {
772 		dev->xfrmdev_ops->xdo_dev_state_delete(dev, x);
773 		spin_lock_bh(&xfrm_state_dev_gc_lock);
774 		hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list);
775 		spin_unlock_bh(&xfrm_state_dev_gc_lock);
776 	}
777 }
778 EXPORT_SYMBOL_GPL(xfrm_dev_state_delete);
779 
780 void xfrm_dev_state_free(struct xfrm_state *x)
781 {
782 	struct xfrm_dev_offload *xso = &x->xso;
783 	struct net_device *dev = READ_ONCE(xso->dev);
784 
785 	if (dev && dev->xfrmdev_ops) {
786 		spin_lock_bh(&xfrm_state_dev_gc_lock);
787 		if (!hlist_unhashed(&x->dev_gclist))
788 			hlist_del(&x->dev_gclist);
789 		spin_unlock_bh(&xfrm_state_dev_gc_lock);
790 
791 		if (dev->xfrmdev_ops->xdo_dev_state_free)
792 			dev->xfrmdev_ops->xdo_dev_state_free(dev, x);
793 		WRITE_ONCE(xso->dev, NULL);
794 		xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
795 		netdev_put(dev, &xso->dev_tracker);
796 	}
797 }
798 #endif
799 
800 void __xfrm_state_destroy(struct xfrm_state *x)
801 {
802 	WARN_ON(x->km.state != XFRM_STATE_DEAD);
803 
804 	spin_lock_bh(&xfrm_state_gc_lock);
805 	hlist_add_head(&x->gclist, &xfrm_state_gc_list);
806 	spin_unlock_bh(&xfrm_state_gc_lock);
807 	schedule_work(&xfrm_state_gc_work);
808 }
809 EXPORT_SYMBOL(__xfrm_state_destroy);
810 
811 int __xfrm_state_delete(struct xfrm_state *x)
812 {
813 	struct net *net = xs_net(x);
814 	int err = -ESRCH;
815 
816 	if (x->km.state != XFRM_STATE_DEAD) {
817 		x->km.state = XFRM_STATE_DEAD;
818 
819 		spin_lock(&net->xfrm.xfrm_state_lock);
820 		list_del(&x->km.all);
821 		hlist_del_rcu(&x->bydst);
822 		hlist_del_rcu(&x->bysrc);
823 		if (x->km.seq)
824 			hlist_del_rcu(&x->byseq);
825 		if (!hlist_unhashed(&x->state_cache))
826 			hlist_del_rcu(&x->state_cache);
827 		if (!hlist_unhashed(&x->state_cache_input))
828 			hlist_del_rcu(&x->state_cache_input);
829 
830 		if (x->id.spi)
831 			hlist_del_rcu(&x->byspi);
832 		net->xfrm.state_num--;
833 		xfrm_nat_keepalive_state_updated(x);
834 		spin_unlock(&net->xfrm.xfrm_state_lock);
835 
836 		xfrm_dev_state_delete(x);
837 
838 		xfrm_state_delete_tunnel(x);
839 
840 		/* All xfrm_state objects are created by xfrm_state_alloc.
841 		 * The xfrm_state_alloc call gives a reference, and that
842 		 * is what we are dropping here.
843 		 */
844 		xfrm_state_put(x);
845 		err = 0;
846 	}
847 
848 	return err;
849 }
850 EXPORT_SYMBOL(__xfrm_state_delete);
851 
852 int xfrm_state_delete(struct xfrm_state *x)
853 {
854 	int err;
855 
856 	spin_lock_bh(&x->lock);
857 	err = __xfrm_state_delete(x);
858 	spin_unlock_bh(&x->lock);
859 
860 	return err;
861 }
862 EXPORT_SYMBOL(xfrm_state_delete);
863 
864 #ifdef CONFIG_SECURITY_NETWORK_XFRM
865 static inline int
866 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
867 {
868 	int i, err = 0;
869 
870 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
871 		struct xfrm_state *x;
872 
873 		hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_bydst, net) + i, bydst) {
874 			if (xfrm_id_proto_match(x->id.proto, proto) &&
875 			   (err = security_xfrm_state_delete(x)) != 0) {
876 				xfrm_audit_state_delete(x, 0, task_valid);
877 				return err;
878 			}
879 		}
880 	}
881 
882 	return err;
883 }
884 
885 static inline int
886 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
887 {
888 	int i, err = 0;
889 
890 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
891 		struct xfrm_state *x;
892 		struct xfrm_dev_offload *xso;
893 
894 		hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_bydst, net) + i, bydst) {
895 			xso = &x->xso;
896 
897 			if (xso->dev == dev &&
898 			   (err = security_xfrm_state_delete(x)) != 0) {
899 				xfrm_audit_state_delete(x, 0, task_valid);
900 				return err;
901 			}
902 		}
903 	}
904 
905 	return err;
906 }
907 #else
908 static inline int
909 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
910 {
911 	return 0;
912 }
913 
914 static inline int
915 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
916 {
917 	return 0;
918 }
919 #endif
920 
921 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid)
922 {
923 	int i, err = 0, cnt = 0;
924 
925 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
926 	err = xfrm_state_flush_secctx_check(net, proto, task_valid);
927 	if (err)
928 		goto out;
929 
930 	err = -ESRCH;
931 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
932 		struct xfrm_state *x;
933 restart:
934 		hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_bydst, net) + i, bydst) {
935 			if (!xfrm_state_kern(x) &&
936 			    xfrm_id_proto_match(x->id.proto, proto)) {
937 				xfrm_state_hold(x);
938 				spin_unlock_bh(&net->xfrm.xfrm_state_lock);
939 
940 				err = xfrm_state_delete(x);
941 				xfrm_audit_state_delete(x, err ? 0 : 1,
942 							task_valid);
943 				xfrm_state_put(x);
944 				if (!err)
945 					cnt++;
946 
947 				spin_lock_bh(&net->xfrm.xfrm_state_lock);
948 				goto restart;
949 			}
950 		}
951 	}
952 out:
953 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
954 	if (cnt)
955 		err = 0;
956 
957 	return err;
958 }
959 EXPORT_SYMBOL(xfrm_state_flush);
960 
961 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
962 {
963 	struct xfrm_state *x;
964 	struct hlist_node *tmp;
965 	struct xfrm_dev_offload *xso;
966 	int i, err = 0, cnt = 0;
967 
968 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
969 	err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
970 	if (err)
971 		goto out;
972 
973 	err = -ESRCH;
974 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
975 restart:
976 		hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_bydst, net) + i, bydst) {
977 			xso = &x->xso;
978 
979 			if (!xfrm_state_kern(x) && xso->dev == dev) {
980 				xfrm_state_hold(x);
981 				spin_unlock_bh(&net->xfrm.xfrm_state_lock);
982 
983 				err = xfrm_state_delete(x);
984 				xfrm_dev_state_free(x);
985 
986 				xfrm_audit_state_delete(x, err ? 0 : 1,
987 							task_valid);
988 				xfrm_state_put(x);
989 				if (!err)
990 					cnt++;
991 
992 				spin_lock_bh(&net->xfrm.xfrm_state_lock);
993 				goto restart;
994 			}
995 		}
996 	}
997 	if (cnt)
998 		err = 0;
999 
1000 out:
1001 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1002 
1003 	spin_lock_bh(&xfrm_state_dev_gc_lock);
1004 restart_gc:
1005 	hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) {
1006 		xso = &x->xso;
1007 
1008 		if (xso->dev == dev) {
1009 			spin_unlock_bh(&xfrm_state_dev_gc_lock);
1010 			xfrm_dev_state_free(x);
1011 			spin_lock_bh(&xfrm_state_dev_gc_lock);
1012 			goto restart_gc;
1013 		}
1014 
1015 	}
1016 	spin_unlock_bh(&xfrm_state_dev_gc_lock);
1017 
1018 	xfrm_flush_gc();
1019 
1020 	return err;
1021 }
1022 EXPORT_SYMBOL(xfrm_dev_state_flush);
1023 
1024 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
1025 {
1026 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1027 	si->sadcnt = net->xfrm.state_num;
1028 	si->sadhcnt = net->xfrm.state_hmask + 1;
1029 	si->sadhmcnt = xfrm_state_hashmax;
1030 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1031 }
1032 EXPORT_SYMBOL(xfrm_sad_getinfo);
1033 
1034 static void
1035 __xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
1036 {
1037 	const struct flowi4 *fl4 = &fl->u.ip4;
1038 
1039 	sel->daddr.a4 = fl4->daddr;
1040 	sel->saddr.a4 = fl4->saddr;
1041 	sel->dport = xfrm_flowi_dport(fl, &fl4->uli);
1042 	sel->dport_mask = htons(0xffff);
1043 	sel->sport = xfrm_flowi_sport(fl, &fl4->uli);
1044 	sel->sport_mask = htons(0xffff);
1045 	sel->family = AF_INET;
1046 	sel->prefixlen_d = 32;
1047 	sel->prefixlen_s = 32;
1048 	sel->proto = fl4->flowi4_proto;
1049 	sel->ifindex = fl4->flowi4_oif;
1050 }
1051 
1052 static void
1053 __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
1054 {
1055 	const struct flowi6 *fl6 = &fl->u.ip6;
1056 
1057 	/* Initialize temporary selector matching only to current session. */
1058 	*(struct in6_addr *)&sel->daddr = fl6->daddr;
1059 	*(struct in6_addr *)&sel->saddr = fl6->saddr;
1060 	sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
1061 	sel->dport_mask = htons(0xffff);
1062 	sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
1063 	sel->sport_mask = htons(0xffff);
1064 	sel->family = AF_INET6;
1065 	sel->prefixlen_d = 128;
1066 	sel->prefixlen_s = 128;
1067 	sel->proto = fl6->flowi6_proto;
1068 	sel->ifindex = fl6->flowi6_oif;
1069 }
1070 
1071 static void
1072 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
1073 		    const struct xfrm_tmpl *tmpl,
1074 		    const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1075 		    unsigned short family)
1076 {
1077 	switch (family) {
1078 	case AF_INET:
1079 		__xfrm4_init_tempsel(&x->sel, fl);
1080 		break;
1081 	case AF_INET6:
1082 		__xfrm6_init_tempsel(&x->sel, fl);
1083 		break;
1084 	}
1085 
1086 	x->id = tmpl->id;
1087 
1088 	switch (tmpl->encap_family) {
1089 	case AF_INET:
1090 		if (x->id.daddr.a4 == 0)
1091 			x->id.daddr.a4 = daddr->a4;
1092 		x->props.saddr = tmpl->saddr;
1093 		if (x->props.saddr.a4 == 0)
1094 			x->props.saddr.a4 = saddr->a4;
1095 		break;
1096 	case AF_INET6:
1097 		if (ipv6_addr_any((struct in6_addr *)&x->id.daddr))
1098 			memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
1099 		memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
1100 		if (ipv6_addr_any((struct in6_addr *)&x->props.saddr))
1101 			memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
1102 		break;
1103 	}
1104 
1105 	x->props.mode = tmpl->mode;
1106 	x->props.reqid = tmpl->reqid;
1107 	x->props.family = tmpl->encap_family;
1108 }
1109 
1110 struct xfrm_hash_state_ptrs {
1111 	const struct hlist_head *bydst;
1112 	const struct hlist_head *bysrc;
1113 	const struct hlist_head *byspi;
1114 	unsigned int hmask;
1115 };
1116 
1117 static void xfrm_hash_ptrs_get(const struct net *net, struct xfrm_hash_state_ptrs *ptrs)
1118 {
1119 	unsigned int sequence;
1120 
1121 	do {
1122 		sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
1123 
1124 		ptrs->bydst = xfrm_state_deref_check(net->xfrm.state_bydst, net);
1125 		ptrs->bysrc = xfrm_state_deref_check(net->xfrm.state_bysrc, net);
1126 		ptrs->byspi = xfrm_state_deref_check(net->xfrm.state_byspi, net);
1127 		ptrs->hmask = net->xfrm.state_hmask;
1128 	} while (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence));
1129 }
1130 
1131 static struct xfrm_state *__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs *state_ptrs,
1132 						  u32 mark,
1133 						  const xfrm_address_t *daddr,
1134 						  __be32 spi, u8 proto,
1135 						  unsigned short family,
1136 						  struct xfrm_dev_offload *xdo)
1137 {
1138 	unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
1139 	struct xfrm_state *x;
1140 
1141 	hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
1142 #ifdef CONFIG_XFRM_OFFLOAD
1143 		if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
1144 			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1145 				/* HW states are in the head of list, there is
1146 				 * no need to iterate further.
1147 				 */
1148 				break;
1149 
1150 			/* Packet offload: both policy and SA should
1151 			 * have same device.
1152 			 */
1153 			if (xdo->dev != x->xso.dev)
1154 				continue;
1155 		} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1156 			/* Skip HW policy for SW lookups */
1157 			continue;
1158 #endif
1159 		if (x->props.family != family ||
1160 		    x->id.spi       != spi ||
1161 		    x->id.proto     != proto ||
1162 		    !xfrm_addr_equal(&x->id.daddr, daddr, family))
1163 			continue;
1164 
1165 		if ((mark & x->mark.m) != x->mark.v)
1166 			continue;
1167 		if (!xfrm_state_hold_rcu(x))
1168 			continue;
1169 		return x;
1170 	}
1171 
1172 	return NULL;
1173 }
1174 
1175 static struct xfrm_state *__xfrm_state_lookup(const struct xfrm_hash_state_ptrs *state_ptrs,
1176 					      u32 mark,
1177 					      const xfrm_address_t *daddr,
1178 					      __be32 spi, u8 proto,
1179 					      unsigned short family)
1180 {
1181 	unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
1182 	struct xfrm_state *x;
1183 
1184 	hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
1185 		if (x->props.family != family ||
1186 		    x->id.spi       != spi ||
1187 		    x->id.proto     != proto ||
1188 		    !xfrm_addr_equal(&x->id.daddr, daddr, family))
1189 			continue;
1190 
1191 		if ((mark & x->mark.m) != x->mark.v)
1192 			continue;
1193 		if (!xfrm_state_hold_rcu(x))
1194 			continue;
1195 		return x;
1196 	}
1197 
1198 	return NULL;
1199 }
1200 
1201 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
1202 					   const xfrm_address_t *daddr,
1203 					   __be32 spi, u8 proto,
1204 					   unsigned short family)
1205 {
1206 	struct xfrm_hash_state_ptrs state_ptrs;
1207 	struct hlist_head *state_cache_input;
1208 	struct xfrm_state *x = NULL;
1209 
1210 	state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input);
1211 
1212 	rcu_read_lock();
1213 	hlist_for_each_entry_rcu(x, state_cache_input, state_cache_input) {
1214 		if (x->props.family != family ||
1215 		    x->id.spi       != spi ||
1216 		    x->id.proto     != proto ||
1217 		    !xfrm_addr_equal(&x->id.daddr, daddr, family))
1218 			continue;
1219 
1220 		if ((mark & x->mark.m) != x->mark.v)
1221 			continue;
1222 		if (!xfrm_state_hold_rcu(x))
1223 			continue;
1224 		goto out;
1225 	}
1226 
1227 	xfrm_hash_ptrs_get(net, &state_ptrs);
1228 
1229 	x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
1230 
1231 	if (x && x->km.state == XFRM_STATE_VALID) {
1232 		spin_lock_bh(&net->xfrm.xfrm_state_lock);
1233 		if (hlist_unhashed(&x->state_cache_input)) {
1234 			hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
1235 		} else {
1236 			hlist_del_rcu(&x->state_cache_input);
1237 			hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
1238 		}
1239 		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1240 	}
1241 
1242 out:
1243 	rcu_read_unlock();
1244 	return x;
1245 }
1246 EXPORT_SYMBOL(xfrm_input_state_lookup);
1247 
1248 static struct xfrm_state *__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs *state_ptrs,
1249 						     u32 mark,
1250 						     const xfrm_address_t *daddr,
1251 						     const xfrm_address_t *saddr,
1252 						     u8 proto, unsigned short family)
1253 {
1254 	unsigned int h = __xfrm_src_hash(daddr, saddr, family, state_ptrs->hmask);
1255 	struct xfrm_state *x;
1256 
1257 	hlist_for_each_entry_rcu(x, state_ptrs->bysrc + h, bysrc) {
1258 		if (x->props.family != family ||
1259 		    x->id.proto     != proto ||
1260 		    !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1261 		    !xfrm_addr_equal(&x->props.saddr, saddr, family))
1262 			continue;
1263 
1264 		if ((mark & x->mark.m) != x->mark.v)
1265 			continue;
1266 		if (!xfrm_state_hold_rcu(x))
1267 			continue;
1268 		return x;
1269 	}
1270 
1271 	return NULL;
1272 }
1273 
1274 static inline struct xfrm_state *
1275 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
1276 {
1277 	struct xfrm_hash_state_ptrs state_ptrs;
1278 	struct net *net = xs_net(x);
1279 	u32 mark = x->mark.v & x->mark.m;
1280 
1281 	xfrm_hash_ptrs_get(net, &state_ptrs);
1282 
1283 	if (use_spi)
1284 		return __xfrm_state_lookup(&state_ptrs, mark, &x->id.daddr,
1285 					   x->id.spi, x->id.proto, family);
1286 	else
1287 		return __xfrm_state_lookup_byaddr(&state_ptrs, mark,
1288 						  &x->id.daddr,
1289 						  &x->props.saddr,
1290 						  x->id.proto, family);
1291 }
1292 
1293 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
1294 {
1295 	if (have_hash_collision &&
1296 	    (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
1297 	    net->xfrm.state_num > net->xfrm.state_hmask)
1298 		schedule_work(&net->xfrm.state_hash_work);
1299 }
1300 
1301 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
1302 			       const struct flowi *fl, unsigned short family,
1303 			       struct xfrm_state **best, int *acq_in_progress,
1304 			       int *error, unsigned int pcpu_id)
1305 {
1306 	/* Resolution logic:
1307 	 * 1. There is a valid state with matching selector. Done.
1308 	 * 2. Valid state with inappropriate selector. Skip.
1309 	 *
1310 	 * Entering area of "sysdeps".
1311 	 *
1312 	 * 3. If state is not valid, selector is temporary, it selects
1313 	 *    only session which triggered previous resolution. Key
1314 	 *    manager will do something to install a state with proper
1315 	 *    selector.
1316 	 */
1317 	if (x->km.state == XFRM_STATE_VALID) {
1318 		if ((x->sel.family &&
1319 		     (x->sel.family != family ||
1320 		      !xfrm_selector_match(&x->sel, fl, family))) ||
1321 		    !security_xfrm_state_pol_flow_match(x, pol,
1322 							&fl->u.__fl_common))
1323 			return;
1324 
1325 		if (x->pcpu_num != UINT_MAX && x->pcpu_num != pcpu_id)
1326 			return;
1327 
1328 		if (!*best ||
1329 		    ((*best)->pcpu_num == UINT_MAX && x->pcpu_num == pcpu_id) ||
1330 		    (*best)->km.dying > x->km.dying ||
1331 		    ((*best)->km.dying == x->km.dying &&
1332 		     (*best)->curlft.add_time < x->curlft.add_time))
1333 			*best = x;
1334 	} else if (x->km.state == XFRM_STATE_ACQ) {
1335 		if (!*best || x->pcpu_num == pcpu_id)
1336 			*acq_in_progress = 1;
1337 	} else if (x->km.state == XFRM_STATE_ERROR ||
1338 		   x->km.state == XFRM_STATE_EXPIRED) {
1339 		if ((!x->sel.family ||
1340 		     (x->sel.family == family &&
1341 		      xfrm_selector_match(&x->sel, fl, family))) &&
1342 		    security_xfrm_state_pol_flow_match(x, pol,
1343 						       &fl->u.__fl_common))
1344 			*error = -ESRCH;
1345 	}
1346 }
1347 
1348 struct xfrm_state *
1349 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1350 		const struct flowi *fl, struct xfrm_tmpl *tmpl,
1351 		struct xfrm_policy *pol, int *err,
1352 		unsigned short family, u32 if_id)
1353 {
1354 	static xfrm_address_t saddr_wildcard = { };
1355 	struct xfrm_hash_state_ptrs state_ptrs;
1356 	struct net *net = xp_net(pol);
1357 	unsigned int h, h_wildcard;
1358 	struct xfrm_state *x, *x0, *to_put;
1359 	int acquire_in_progress = 0;
1360 	int error = 0;
1361 	struct xfrm_state *best = NULL;
1362 	u32 mark = pol->mark.v & pol->mark.m;
1363 	unsigned short encap_family = tmpl->encap_family;
1364 	unsigned int sequence;
1365 	struct km_event c;
1366 	unsigned int pcpu_id;
1367 	bool cached = false;
1368 
1369 	/* We need the cpu id just as a lookup key,
1370 	 * we don't require it to be stable.
1371 	 */
1372 	pcpu_id = raw_smp_processor_id();
1373 
1374 	to_put = NULL;
1375 
1376 	sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
1377 
1378 	rcu_read_lock();
1379 	xfrm_hash_ptrs_get(net, &state_ptrs);
1380 
1381 	hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
1382 		if (x->props.family == encap_family &&
1383 		    x->props.reqid == tmpl->reqid &&
1384 		    (mark & x->mark.m) == x->mark.v &&
1385 		    x->if_id == if_id &&
1386 		    !(x->props.flags & XFRM_STATE_WILDRECV) &&
1387 		    xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
1388 		    tmpl->mode == x->props.mode &&
1389 		    tmpl->id.proto == x->id.proto &&
1390 		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1391 			xfrm_state_look_at(pol, x, fl, encap_family,
1392 					   &best, &acquire_in_progress, &error, pcpu_id);
1393 	}
1394 
1395 	if (best)
1396 		goto cached;
1397 
1398 	hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
1399 		if (x->props.family == encap_family &&
1400 		    x->props.reqid == tmpl->reqid &&
1401 		    (mark & x->mark.m) == x->mark.v &&
1402 		    x->if_id == if_id &&
1403 		    !(x->props.flags & XFRM_STATE_WILDRECV) &&
1404 		    xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
1405 		    tmpl->mode == x->props.mode &&
1406 		    tmpl->id.proto == x->id.proto &&
1407 		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1408 			xfrm_state_look_at(pol, x, fl, family,
1409 					   &best, &acquire_in_progress, &error, pcpu_id);
1410 	}
1411 
1412 cached:
1413 	cached = true;
1414 	if (best)
1415 		goto found;
1416 	else if (error)
1417 		best = NULL;
1418 	else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */
1419 		WARN_ON(1);
1420 
1421 	h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask);
1422 	hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) {
1423 #ifdef CONFIG_XFRM_OFFLOAD
1424 		if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1425 			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1426 				/* HW states are in the head of list, there is
1427 				 * no need to iterate further.
1428 				 */
1429 				break;
1430 
1431 			/* Packet offload: both policy and SA should
1432 			 * have same device.
1433 			 */
1434 			if (pol->xdo.dev != x->xso.dev)
1435 				continue;
1436 		} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1437 			/* Skip HW policy for SW lookups */
1438 			continue;
1439 #endif
1440 		if (x->props.family == encap_family &&
1441 		    x->props.reqid == tmpl->reqid &&
1442 		    (mark & x->mark.m) == x->mark.v &&
1443 		    x->if_id == if_id &&
1444 		    !(x->props.flags & XFRM_STATE_WILDRECV) &&
1445 		    xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
1446 		    tmpl->mode == x->props.mode &&
1447 		    tmpl->id.proto == x->id.proto &&
1448 		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1449 			xfrm_state_look_at(pol, x, fl, family,
1450 					   &best, &acquire_in_progress, &error, pcpu_id);
1451 	}
1452 	if (best || acquire_in_progress)
1453 		goto found;
1454 
1455 	h_wildcard = __xfrm_dst_hash(daddr, &saddr_wildcard, tmpl->reqid,
1456 				     encap_family, state_ptrs.hmask);
1457 	hlist_for_each_entry_rcu(x, state_ptrs.bydst + h_wildcard, bydst) {
1458 #ifdef CONFIG_XFRM_OFFLOAD
1459 		if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1460 			if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1461 				/* HW states are in the head of list, there is
1462 				 * no need to iterate further.
1463 				 */
1464 				break;
1465 
1466 			/* Packet offload: both policy and SA should
1467 			 * have same device.
1468 			 */
1469 			if (pol->xdo.dev != x->xso.dev)
1470 				continue;
1471 		} else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1472 			/* Skip HW policy for SW lookups */
1473 			continue;
1474 #endif
1475 		if (x->props.family == encap_family &&
1476 		    x->props.reqid == tmpl->reqid &&
1477 		    (mark & x->mark.m) == x->mark.v &&
1478 		    x->if_id == if_id &&
1479 		    !(x->props.flags & XFRM_STATE_WILDRECV) &&
1480 		    xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
1481 		    tmpl->mode == x->props.mode &&
1482 		    tmpl->id.proto == x->id.proto &&
1483 		    (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1484 			xfrm_state_look_at(pol, x, fl, family,
1485 					   &best, &acquire_in_progress, &error, pcpu_id);
1486 	}
1487 
1488 found:
1489 	if (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) ||
1490 	    (best && (best->pcpu_num == pcpu_id)))
1491 		x = best;
1492 
1493 	if (!x && !error && !acquire_in_progress) {
1494 		if (tmpl->id.spi &&
1495 		    (x0 = __xfrm_state_lookup_all(&state_ptrs, mark, daddr,
1496 						  tmpl->id.spi, tmpl->id.proto,
1497 						  encap_family,
1498 						  &pol->xdo)) != NULL) {
1499 			to_put = x0;
1500 			error = -EEXIST;
1501 			goto out;
1502 		}
1503 
1504 		c.net = net;
1505 		/* If the KMs have no listeners (yet...), avoid allocating an SA
1506 		 * for each and every packet - garbage collection might not
1507 		 * handle the flood.
1508 		 */
1509 		if (!km_is_alive(&c)) {
1510 			error = -ESRCH;
1511 			goto out;
1512 		}
1513 
1514 		x = xfrm_state_alloc(net);
1515 		if (x == NULL) {
1516 			error = -ENOMEM;
1517 			goto out;
1518 		}
1519 		/* Initialize temporary state matching only
1520 		 * to current session. */
1521 		xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
1522 		memcpy(&x->mark, &pol->mark, sizeof(x->mark));
1523 		x->if_id = if_id;
1524 		if ((pol->flags & XFRM_POLICY_CPU_ACQUIRE) && best)
1525 			x->pcpu_num = pcpu_id;
1526 
1527 		error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
1528 		if (error) {
1529 			x->km.state = XFRM_STATE_DEAD;
1530 			to_put = x;
1531 			x = NULL;
1532 			goto out;
1533 		}
1534 #ifdef CONFIG_XFRM_OFFLOAD
1535 		if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1536 			struct xfrm_dev_offload *xdo = &pol->xdo;
1537 			struct xfrm_dev_offload *xso = &x->xso;
1538 			struct net_device *dev = xdo->dev;
1539 
1540 			xso->type = XFRM_DEV_OFFLOAD_PACKET;
1541 			xso->dir = xdo->dir;
1542 			xso->dev = dev;
1543 			xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ;
1544 			netdev_hold(dev, &xso->dev_tracker, GFP_ATOMIC);
1545 			error = dev->xfrmdev_ops->xdo_dev_state_add(dev, x,
1546 								    NULL);
1547 			if (error) {
1548 				xso->dir = 0;
1549 				netdev_put(dev, &xso->dev_tracker);
1550 				xso->dev = NULL;
1551 				xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
1552 				x->km.state = XFRM_STATE_DEAD;
1553 				to_put = x;
1554 				x = NULL;
1555 				goto out;
1556 			}
1557 		}
1558 #endif
1559 		if (km_query(x, tmpl, pol) == 0) {
1560 			spin_lock_bh(&net->xfrm.xfrm_state_lock);
1561 			x->km.state = XFRM_STATE_ACQ;
1562 			x->dir = XFRM_SA_DIR_OUT;
1563 			list_add(&x->km.all, &net->xfrm.state_all);
1564 			h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
1565 			XFRM_STATE_INSERT(bydst, &x->bydst,
1566 					  xfrm_state_deref_prot(net->xfrm.state_bydst, net) + h,
1567 					  x->xso.type);
1568 			h = xfrm_src_hash(net, daddr, saddr, encap_family);
1569 			XFRM_STATE_INSERT(bysrc, &x->bysrc,
1570 					  xfrm_state_deref_prot(net->xfrm.state_bysrc, net) + h,
1571 					  x->xso.type);
1572 			INIT_HLIST_NODE(&x->state_cache);
1573 			if (x->id.spi) {
1574 				h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1575 				XFRM_STATE_INSERT(byspi, &x->byspi,
1576 						  xfrm_state_deref_prot(net->xfrm.state_byspi, net) + h,
1577 						  x->xso.type);
1578 			}
1579 			if (x->km.seq) {
1580 				h = xfrm_seq_hash(net, x->km.seq);
1581 				XFRM_STATE_INSERT(byseq, &x->byseq,
1582 						  xfrm_state_deref_prot(net->xfrm.state_byseq, net) + h,
1583 						  x->xso.type);
1584 			}
1585 			x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1586 			hrtimer_start(&x->mtimer,
1587 				      ktime_set(net->xfrm.sysctl_acq_expires, 0),
1588 				      HRTIMER_MODE_REL_SOFT);
1589 			net->xfrm.state_num++;
1590 			xfrm_hash_grow_check(net, x->bydst.next != NULL);
1591 			spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1592 		} else {
1593 #ifdef CONFIG_XFRM_OFFLOAD
1594 			struct xfrm_dev_offload *xso = &x->xso;
1595 
1596 			if (xso->type == XFRM_DEV_OFFLOAD_PACKET) {
1597 				xfrm_dev_state_delete(x);
1598 				xfrm_dev_state_free(x);
1599 			}
1600 #endif
1601 			x->km.state = XFRM_STATE_DEAD;
1602 			to_put = x;
1603 			x = NULL;
1604 			error = -ESRCH;
1605 		}
1606 
1607 		/* Use the already installed 'fallback' while the CPU-specific
1608 		 * SA acquire is handled*/
1609 		if (best)
1610 			x = best;
1611 	}
1612 out:
1613 	if (x) {
1614 		if (!xfrm_state_hold_rcu(x)) {
1615 			*err = -EAGAIN;
1616 			x = NULL;
1617 		}
1618 	} else {
1619 		*err = acquire_in_progress ? -EAGAIN : error;
1620 	}
1621 
1622 	if (x && x->km.state == XFRM_STATE_VALID && !cached &&
1623 	    (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || x->pcpu_num == pcpu_id)) {
1624 		spin_lock_bh(&net->xfrm.xfrm_state_lock);
1625 		if (hlist_unhashed(&x->state_cache))
1626 			hlist_add_head_rcu(&x->state_cache, &pol->state_cache_list);
1627 		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1628 	}
1629 
1630 	rcu_read_unlock();
1631 	if (to_put)
1632 		xfrm_state_put(to_put);
1633 
1634 	if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
1635 		*err = -EAGAIN;
1636 		if (x) {
1637 			xfrm_state_put(x);
1638 			x = NULL;
1639 		}
1640 	}
1641 
1642 	return x;
1643 }
1644 
1645 struct xfrm_state *
1646 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1647 		    xfrm_address_t *daddr, xfrm_address_t *saddr,
1648 		    unsigned short family, u8 mode, u8 proto, u32 reqid)
1649 {
1650 	unsigned int h;
1651 	struct xfrm_state *rx = NULL, *x = NULL;
1652 
1653 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1654 	h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1655 	hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_bydst, net) + h, bydst) {
1656 		if (x->props.family == family &&
1657 		    x->props.reqid == reqid &&
1658 		    (mark & x->mark.m) == x->mark.v &&
1659 		    x->if_id == if_id &&
1660 		    !(x->props.flags & XFRM_STATE_WILDRECV) &&
1661 		    xfrm_state_addr_check(x, daddr, saddr, family) &&
1662 		    mode == x->props.mode &&
1663 		    proto == x->id.proto &&
1664 		    x->km.state == XFRM_STATE_VALID) {
1665 			rx = x;
1666 			break;
1667 		}
1668 	}
1669 
1670 	if (rx)
1671 		xfrm_state_hold(rx);
1672 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1673 
1674 
1675 	return rx;
1676 }
1677 EXPORT_SYMBOL(xfrm_stateonly_find);
1678 
1679 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1680 					      unsigned short family)
1681 {
1682 	struct xfrm_state *x;
1683 	struct xfrm_state_walk *w;
1684 
1685 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1686 	list_for_each_entry(w, &net->xfrm.state_all, all) {
1687 		x = container_of(w, struct xfrm_state, km);
1688 		if (x->props.family != family ||
1689 			x->id.spi != spi)
1690 			continue;
1691 
1692 		xfrm_state_hold(x);
1693 		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1694 		return x;
1695 	}
1696 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1697 	return NULL;
1698 }
1699 EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1700 
1701 static struct xfrm_state *xfrm_state_lookup_spi_proto(struct net *net, __be32 spi, u8 proto)
1702 {
1703 	struct xfrm_state *x;
1704 	unsigned int i;
1705 
1706 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
1707 		hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_byspi, net) + i, byspi) {
1708 			if (x->id.spi == spi && x->id.proto == proto)
1709 				return x;
1710 		}
1711 	}
1712 	return NULL;
1713 }
1714 
1715 static void __xfrm_state_insert(struct xfrm_state *x)
1716 {
1717 	struct net *net = xs_net(x);
1718 	unsigned int h;
1719 
1720 	list_add(&x->km.all, &net->xfrm.state_all);
1721 
1722 	/* Sanitize mark before store */
1723 	x->mark.v &= x->mark.m;
1724 
1725 	h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1726 			  x->props.reqid, x->props.family);
1727 	XFRM_STATE_INSERT(bydst, &x->bydst,
1728 			  xfrm_state_deref_prot(net->xfrm.state_bydst, net) + h,
1729 			  x->xso.type);
1730 
1731 	h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1732 	XFRM_STATE_INSERT(bysrc, &x->bysrc,
1733 			  xfrm_state_deref_prot(net->xfrm.state_bysrc, net) + h,
1734 			  x->xso.type);
1735 
1736 	if (x->id.spi) {
1737 		h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1738 				  x->props.family);
1739 
1740 		XFRM_STATE_INSERT(byspi, &x->byspi,
1741 				  xfrm_state_deref_prot(net->xfrm.state_byspi, net) + h,
1742 				  x->xso.type);
1743 	}
1744 
1745 	if (x->km.seq) {
1746 		h = xfrm_seq_hash(net, x->km.seq);
1747 
1748 		XFRM_STATE_INSERT(byseq, &x->byseq,
1749 				  xfrm_state_deref_prot(net->xfrm.state_byseq, net) + h,
1750 				  x->xso.type);
1751 	}
1752 
1753 	hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
1754 	if (x->replay_maxage)
1755 		mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1756 
1757 	net->xfrm.state_num++;
1758 
1759 	xfrm_hash_grow_check(net, x->bydst.next != NULL);
1760 	xfrm_nat_keepalive_state_updated(x);
1761 }
1762 
1763 /* net->xfrm.xfrm_state_lock is held */
1764 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1765 {
1766 	struct net *net = xs_net(xnew);
1767 	unsigned short family = xnew->props.family;
1768 	u32 reqid = xnew->props.reqid;
1769 	struct xfrm_state *x;
1770 	unsigned int h;
1771 	u32 mark = xnew->mark.v & xnew->mark.m;
1772 	u32 if_id = xnew->if_id;
1773 	u32 cpu_id = xnew->pcpu_num;
1774 
1775 	h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1776 	hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_bydst, net) + h, bydst) {
1777 		if (x->props.family	== family &&
1778 		    x->props.reqid	== reqid &&
1779 		    x->if_id		== if_id &&
1780 		    x->pcpu_num		== cpu_id &&
1781 		    (mark & x->mark.m) == x->mark.v &&
1782 		    xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1783 		    xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1784 			x->genid++;
1785 	}
1786 }
1787 
1788 void xfrm_state_insert(struct xfrm_state *x)
1789 {
1790 	struct net *net = xs_net(x);
1791 
1792 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1793 	__xfrm_state_bump_genids(x);
1794 	__xfrm_state_insert(x);
1795 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1796 }
1797 EXPORT_SYMBOL(xfrm_state_insert);
1798 
1799 /* net->xfrm.xfrm_state_lock is held */
1800 static struct xfrm_state *__find_acq_core(struct net *net,
1801 					  const struct xfrm_mark *m,
1802 					  unsigned short family, u8 mode,
1803 					  u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
1804 					  const xfrm_address_t *daddr,
1805 					  const xfrm_address_t *saddr,
1806 					  int create)
1807 {
1808 	unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1809 	struct xfrm_state *x;
1810 	u32 mark = m->v & m->m;
1811 
1812 	hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_bydst, net) + h, bydst) {
1813 		if (x->props.reqid  != reqid ||
1814 		    x->props.mode   != mode ||
1815 		    x->props.family != family ||
1816 		    x->km.state     != XFRM_STATE_ACQ ||
1817 		    x->id.spi       != 0 ||
1818 		    x->id.proto	    != proto ||
1819 		    (mark & x->mark.m) != x->mark.v ||
1820 		    x->pcpu_num != pcpu_num ||
1821 		    !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1822 		    !xfrm_addr_equal(&x->props.saddr, saddr, family))
1823 			continue;
1824 
1825 		xfrm_state_hold(x);
1826 		return x;
1827 	}
1828 
1829 	if (!create)
1830 		return NULL;
1831 
1832 	x = xfrm_state_alloc(net);
1833 	if (likely(x)) {
1834 		switch (family) {
1835 		case AF_INET:
1836 			x->sel.daddr.a4 = daddr->a4;
1837 			x->sel.saddr.a4 = saddr->a4;
1838 			x->sel.prefixlen_d = 32;
1839 			x->sel.prefixlen_s = 32;
1840 			x->props.saddr.a4 = saddr->a4;
1841 			x->id.daddr.a4 = daddr->a4;
1842 			break;
1843 
1844 		case AF_INET6:
1845 			x->sel.daddr.in6 = daddr->in6;
1846 			x->sel.saddr.in6 = saddr->in6;
1847 			x->sel.prefixlen_d = 128;
1848 			x->sel.prefixlen_s = 128;
1849 			x->props.saddr.in6 = saddr->in6;
1850 			x->id.daddr.in6 = daddr->in6;
1851 			break;
1852 		}
1853 
1854 		x->pcpu_num = pcpu_num;
1855 		x->km.state = XFRM_STATE_ACQ;
1856 		x->id.proto = proto;
1857 		x->props.family = family;
1858 		x->props.mode = mode;
1859 		x->props.reqid = reqid;
1860 		x->if_id = if_id;
1861 		x->mark.v = m->v;
1862 		x->mark.m = m->m;
1863 		x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1864 		xfrm_state_hold(x);
1865 		hrtimer_start(&x->mtimer,
1866 			      ktime_set(net->xfrm.sysctl_acq_expires, 0),
1867 			      HRTIMER_MODE_REL_SOFT);
1868 		list_add(&x->km.all, &net->xfrm.state_all);
1869 		XFRM_STATE_INSERT(bydst, &x->bydst,
1870 				  xfrm_state_deref_prot(net->xfrm.state_bydst, net) + h,
1871 				  x->xso.type);
1872 		h = xfrm_src_hash(net, daddr, saddr, family);
1873 		XFRM_STATE_INSERT(bysrc, &x->bysrc,
1874 				  xfrm_state_deref_prot(net->xfrm.state_bysrc, net) + h,
1875 				  x->xso.type);
1876 
1877 		net->xfrm.state_num++;
1878 
1879 		xfrm_hash_grow_check(net, x->bydst.next != NULL);
1880 	}
1881 
1882 	return x;
1883 }
1884 
1885 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
1886 
1887 int xfrm_state_add(struct xfrm_state *x)
1888 {
1889 	struct net *net = xs_net(x);
1890 	struct xfrm_state *x1, *to_put;
1891 	int family;
1892 	int err;
1893 	u32 mark = x->mark.v & x->mark.m;
1894 	int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1895 
1896 	family = x->props.family;
1897 
1898 	to_put = NULL;
1899 
1900 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
1901 
1902 	x1 = __xfrm_state_locate(x, use_spi, family);
1903 	if (x1) {
1904 		to_put = x1;
1905 		x1 = NULL;
1906 		err = -EEXIST;
1907 		goto out;
1908 	}
1909 
1910 	if (use_spi && x->km.seq) {
1911 		x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq, x->pcpu_num);
1912 		if (x1 && ((x1->id.proto != x->id.proto) ||
1913 		    !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1914 			to_put = x1;
1915 			x1 = NULL;
1916 		}
1917 	}
1918 
1919 	if (use_spi && !x1)
1920 		x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1921 				     x->props.reqid, x->if_id, x->pcpu_num, x->id.proto,
1922 				     &x->id.daddr, &x->props.saddr, 0);
1923 
1924 	__xfrm_state_bump_genids(x);
1925 	__xfrm_state_insert(x);
1926 	err = 0;
1927 
1928 out:
1929 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1930 
1931 	if (x1) {
1932 		xfrm_state_delete(x1);
1933 		xfrm_state_put(x1);
1934 	}
1935 
1936 	if (to_put)
1937 		xfrm_state_put(to_put);
1938 
1939 	return err;
1940 }
1941 EXPORT_SYMBOL(xfrm_state_add);
1942 
1943 #ifdef CONFIG_XFRM_MIGRATE
1944 static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security)
1945 {
1946 	struct xfrm_user_sec_ctx *uctx;
1947 	int size = sizeof(*uctx) + security->ctx_len;
1948 	int err;
1949 
1950 	uctx = kmalloc(size, GFP_KERNEL);
1951 	if (!uctx)
1952 		return -ENOMEM;
1953 
1954 	uctx->exttype = XFRMA_SEC_CTX;
1955 	uctx->len = size;
1956 	uctx->ctx_doi = security->ctx_doi;
1957 	uctx->ctx_alg = security->ctx_alg;
1958 	uctx->ctx_len = security->ctx_len;
1959 	memcpy(uctx + 1, security->ctx_str, security->ctx_len);
1960 	err = security_xfrm_state_alloc(x, uctx);
1961 	kfree(uctx);
1962 	if (err)
1963 		return err;
1964 
1965 	return 0;
1966 }
1967 
1968 static struct xfrm_state *xfrm_state_clone_and_setup(struct xfrm_state *orig,
1969 					   struct xfrm_encap_tmpl *encap,
1970 					   struct xfrm_migrate *m)
1971 {
1972 	struct net *net = xs_net(orig);
1973 	struct xfrm_state *x = xfrm_state_alloc(net);
1974 	if (!x)
1975 		goto out;
1976 
1977 	memcpy(&x->id, &orig->id, sizeof(x->id));
1978 	memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1979 	memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1980 	x->props.mode = orig->props.mode;
1981 	x->props.replay_window = orig->props.replay_window;
1982 	x->props.reqid = orig->props.reqid;
1983 	x->props.family = orig->props.family;
1984 	x->props.saddr = orig->props.saddr;
1985 
1986 	if (orig->aalg) {
1987 		x->aalg = xfrm_algo_auth_clone(orig->aalg);
1988 		if (!x->aalg)
1989 			goto error;
1990 	}
1991 	x->props.aalgo = orig->props.aalgo;
1992 
1993 	if (orig->aead) {
1994 		x->aead = xfrm_algo_aead_clone(orig->aead);
1995 		x->geniv = orig->geniv;
1996 		if (!x->aead)
1997 			goto error;
1998 	}
1999 	if (orig->ealg) {
2000 		x->ealg = xfrm_algo_clone(orig->ealg);
2001 		if (!x->ealg)
2002 			goto error;
2003 	}
2004 	x->props.ealgo = orig->props.ealgo;
2005 
2006 	if (orig->calg) {
2007 		x->calg = xfrm_algo_clone(orig->calg);
2008 		if (!x->calg)
2009 			goto error;
2010 	}
2011 	x->props.calgo = orig->props.calgo;
2012 
2013 	if (encap || orig->encap) {
2014 		if (encap)
2015 			x->encap = kmemdup(encap, sizeof(*x->encap),
2016 					GFP_KERNEL);
2017 		else
2018 			x->encap = kmemdup(orig->encap, sizeof(*x->encap),
2019 					GFP_KERNEL);
2020 
2021 		if (!x->encap)
2022 			goto error;
2023 	}
2024 
2025 	if (orig->security)
2026 		if (clone_security(x, orig->security))
2027 			goto error;
2028 
2029 	if (orig->coaddr) {
2030 		x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
2031 				    GFP_KERNEL);
2032 		if (!x->coaddr)
2033 			goto error;
2034 	}
2035 
2036 	if (orig->replay_esn) {
2037 		if (xfrm_replay_clone(x, orig))
2038 			goto error;
2039 	}
2040 
2041 	memcpy(&x->mark, &orig->mark, sizeof(x->mark));
2042 	memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
2043 
2044 	x->props.flags = orig->props.flags;
2045 	x->props.extra_flags = orig->props.extra_flags;
2046 
2047 	x->pcpu_num = orig->pcpu_num;
2048 	x->if_id = orig->if_id;
2049 	x->tfcpad = orig->tfcpad;
2050 	x->replay_maxdiff = orig->replay_maxdiff;
2051 	x->replay_maxage = orig->replay_maxage;
2052 	memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft));
2053 	x->km.state = orig->km.state;
2054 	x->km.seq = orig->km.seq;
2055 	x->replay = orig->replay;
2056 	x->preplay = orig->preplay;
2057 	x->mapping_maxage = orig->mapping_maxage;
2058 	x->lastused = orig->lastused;
2059 	x->new_mapping = 0;
2060 	x->new_mapping_sport = 0;
2061 	x->dir = orig->dir;
2062 
2063 	x->mode_cbs = orig->mode_cbs;
2064 	if (x->mode_cbs && x->mode_cbs->clone_state) {
2065 		if (x->mode_cbs->clone_state(x, orig))
2066 			goto error;
2067 	}
2068 
2069 
2070 	x->props.family = m->new_family;
2071 	memcpy(&x->id.daddr, &m->new_daddr, sizeof(x->id.daddr));
2072 	memcpy(&x->props.saddr, &m->new_saddr, sizeof(x->props.saddr));
2073 
2074 	return x;
2075 
2076  error:
2077 	x->km.state = XFRM_STATE_DEAD;
2078 	xfrm_state_put(x);
2079 out:
2080 	return NULL;
2081 }
2082 
2083 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
2084 						u32 if_id)
2085 {
2086 	unsigned int h;
2087 	struct xfrm_state *x = NULL;
2088 
2089 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
2090 
2091 	if (m->reqid) {
2092 		h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
2093 				  m->reqid, m->old_family);
2094 		hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_bydst, net) + h, bydst) {
2095 			if (x->props.mode != m->mode ||
2096 			    x->id.proto != m->proto)
2097 				continue;
2098 			if (m->reqid && x->props.reqid != m->reqid)
2099 				continue;
2100 			if (if_id != 0 && x->if_id != if_id)
2101 				continue;
2102 			if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
2103 					     m->old_family) ||
2104 			    !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
2105 					     m->old_family))
2106 				continue;
2107 			xfrm_state_hold(x);
2108 			break;
2109 		}
2110 	} else {
2111 		h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
2112 				  m->old_family);
2113 		hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_bysrc, net) + h, bysrc) {
2114 			if (x->props.mode != m->mode ||
2115 			    x->id.proto != m->proto)
2116 				continue;
2117 			if (if_id != 0 && x->if_id != if_id)
2118 				continue;
2119 			if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
2120 					     m->old_family) ||
2121 			    !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
2122 					     m->old_family))
2123 				continue;
2124 			xfrm_state_hold(x);
2125 			break;
2126 		}
2127 	}
2128 
2129 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2130 
2131 	return x;
2132 }
2133 EXPORT_SYMBOL(xfrm_migrate_state_find);
2134 
2135 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
2136 				      struct xfrm_migrate *m,
2137 				      struct xfrm_encap_tmpl *encap,
2138 				      struct net *net,
2139 				      struct xfrm_user_offload *xuo,
2140 				      struct netlink_ext_ack *extack)
2141 {
2142 	struct xfrm_state *xc;
2143 
2144 	xc = xfrm_state_clone_and_setup(x, encap, m);
2145 	if (!xc)
2146 		return NULL;
2147 
2148 	if (xfrm_init_state(xc) < 0)
2149 		goto error;
2150 
2151 	/* configure the hardware if offload is requested */
2152 	if (xuo && xfrm_dev_state_add(net, xc, xuo, extack))
2153 		goto error;
2154 
2155 	/* add state */
2156 	if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
2157 		/* a care is needed when the destination address of the
2158 		   state is to be updated as it is a part of triplet */
2159 		xfrm_state_insert(xc);
2160 	} else {
2161 		if (xfrm_state_add(xc) < 0)
2162 			goto error_add;
2163 	}
2164 
2165 	return xc;
2166 error_add:
2167 	if (xuo)
2168 		xfrm_dev_state_delete(xc);
2169 error:
2170 	xc->km.state = XFRM_STATE_DEAD;
2171 	xfrm_state_put(xc);
2172 	return NULL;
2173 }
2174 EXPORT_SYMBOL(xfrm_state_migrate);
2175 #endif
2176 
2177 int xfrm_state_update(struct xfrm_state *x)
2178 {
2179 	struct xfrm_state *x1, *to_put;
2180 	int err;
2181 	int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
2182 	struct net *net = xs_net(x);
2183 
2184 	to_put = NULL;
2185 
2186 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
2187 	x1 = __xfrm_state_locate(x, use_spi, x->props.family);
2188 
2189 	err = -ESRCH;
2190 	if (!x1)
2191 		goto out;
2192 
2193 	if (xfrm_state_kern(x1)) {
2194 		to_put = x1;
2195 		err = -EEXIST;
2196 		goto out;
2197 	}
2198 
2199 	if (x1->km.state == XFRM_STATE_ACQ) {
2200 		if (x->dir && x1->dir != x->dir) {
2201 			to_put = x1;
2202 			goto out;
2203 		}
2204 
2205 		__xfrm_state_insert(x);
2206 		x = NULL;
2207 	} else {
2208 		if (x1->dir != x->dir) {
2209 			to_put = x1;
2210 			goto out;
2211 		}
2212 	}
2213 	err = 0;
2214 
2215 out:
2216 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2217 
2218 	if (to_put)
2219 		xfrm_state_put(to_put);
2220 
2221 	if (err)
2222 		return err;
2223 
2224 	if (!x) {
2225 		xfrm_state_delete(x1);
2226 		xfrm_state_put(x1);
2227 		return 0;
2228 	}
2229 
2230 	err = -EINVAL;
2231 	spin_lock_bh(&x1->lock);
2232 	if (likely(x1->km.state == XFRM_STATE_VALID)) {
2233 		if (x->encap && x1->encap &&
2234 		    x->encap->encap_type == x1->encap->encap_type)
2235 			memcpy(x1->encap, x->encap, sizeof(*x1->encap));
2236 		else if (x->encap || x1->encap)
2237 			goto fail;
2238 
2239 		if (x->coaddr && x1->coaddr) {
2240 			memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
2241 		}
2242 		if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
2243 			memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
2244 		memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
2245 		x1->km.dying = 0;
2246 
2247 		hrtimer_start(&x1->mtimer, ktime_set(1, 0),
2248 			      HRTIMER_MODE_REL_SOFT);
2249 		if (READ_ONCE(x1->curlft.use_time))
2250 			xfrm_state_check_expire(x1);
2251 
2252 		if (x->props.smark.m || x->props.smark.v || x->if_id) {
2253 			spin_lock_bh(&net->xfrm.xfrm_state_lock);
2254 
2255 			if (x->props.smark.m || x->props.smark.v)
2256 				x1->props.smark = x->props.smark;
2257 
2258 			if (x->if_id)
2259 				x1->if_id = x->if_id;
2260 
2261 			__xfrm_state_bump_genids(x1);
2262 			spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2263 		}
2264 
2265 		err = 0;
2266 		x->km.state = XFRM_STATE_DEAD;
2267 		xfrm_dev_state_delete(x);
2268 		__xfrm_state_put(x);
2269 	}
2270 
2271 fail:
2272 	spin_unlock_bh(&x1->lock);
2273 
2274 	xfrm_state_put(x1);
2275 
2276 	return err;
2277 }
2278 EXPORT_SYMBOL(xfrm_state_update);
2279 
2280 int xfrm_state_check_expire(struct xfrm_state *x)
2281 {
2282 	/* All counters which are needed to decide if state is expired
2283 	 * are handled by SW for non-packet offload modes. Simply skip
2284 	 * the following update and save extra boilerplate in drivers.
2285 	 */
2286 	if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
2287 		xfrm_dev_state_update_stats(x);
2288 
2289 	if (!READ_ONCE(x->curlft.use_time))
2290 		WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
2291 
2292 	if (x->curlft.bytes >= x->lft.hard_byte_limit ||
2293 	    x->curlft.packets >= x->lft.hard_packet_limit) {
2294 		x->km.state = XFRM_STATE_EXPIRED;
2295 		hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
2296 		return -EINVAL;
2297 	}
2298 
2299 	if (!x->km.dying &&
2300 	    (x->curlft.bytes >= x->lft.soft_byte_limit ||
2301 	     x->curlft.packets >= x->lft.soft_packet_limit)) {
2302 		x->km.dying = 1;
2303 		km_state_expired(x, 0, 0);
2304 	}
2305 	return 0;
2306 }
2307 EXPORT_SYMBOL(xfrm_state_check_expire);
2308 
2309 void xfrm_state_update_stats(struct net *net)
2310 {
2311 	struct xfrm_state *x;
2312 	int i;
2313 
2314 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
2315 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
2316 		hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_bydst, net) + i, bydst)
2317 			xfrm_dev_state_update_stats(x);
2318 	}
2319 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2320 }
2321 
2322 struct xfrm_state *
2323 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
2324 		  u8 proto, unsigned short family)
2325 {
2326 	struct xfrm_hash_state_ptrs state_ptrs;
2327 	struct xfrm_state *x;
2328 
2329 	rcu_read_lock();
2330 	xfrm_hash_ptrs_get(net, &state_ptrs);
2331 
2332 	x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
2333 	rcu_read_unlock();
2334 	return x;
2335 }
2336 EXPORT_SYMBOL(xfrm_state_lookup);
2337 
2338 struct xfrm_state *
2339 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
2340 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
2341 			 u8 proto, unsigned short family)
2342 {
2343 	struct xfrm_hash_state_ptrs state_ptrs;
2344 	struct xfrm_state *x;
2345 
2346 	rcu_read_lock();
2347 
2348 	xfrm_hash_ptrs_get(net, &state_ptrs);
2349 
2350 	x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family);
2351 	rcu_read_unlock();
2352 	return x;
2353 }
2354 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
2355 
2356 struct xfrm_state *
2357 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
2358 	      u32 if_id, u32 pcpu_num, u8 proto, const xfrm_address_t *daddr,
2359 	      const xfrm_address_t *saddr, int create, unsigned short family)
2360 {
2361 	struct xfrm_state *x;
2362 
2363 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
2364 	x = __find_acq_core(net, mark, family, mode, reqid, if_id, pcpu_num,
2365 			    proto, daddr, saddr, create);
2366 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2367 
2368 	return x;
2369 }
2370 EXPORT_SYMBOL(xfrm_find_acq);
2371 
2372 #ifdef CONFIG_XFRM_SUB_POLICY
2373 #if IS_ENABLED(CONFIG_IPV6)
2374 /* distribution counting sort function for xfrm_state and xfrm_tmpl */
2375 static void
2376 __xfrm6_sort(void **dst, void **src, int n,
2377 	     int (*cmp)(const void *p), int maxclass)
2378 {
2379 	int count[XFRM_MAX_DEPTH] = { };
2380 	int class[XFRM_MAX_DEPTH];
2381 	int i;
2382 
2383 	for (i = 0; i < n; i++) {
2384 		int c = cmp(src[i]);
2385 
2386 		class[i] = c;
2387 		count[c]++;
2388 	}
2389 
2390 	for (i = 2; i < maxclass; i++)
2391 		count[i] += count[i - 1];
2392 
2393 	for (i = 0; i < n; i++) {
2394 		dst[count[class[i] - 1]++] = src[i];
2395 		src[i] = NULL;
2396 	}
2397 }
2398 
2399 /* Rule for xfrm_state:
2400  *
2401  * rule 1: select IPsec transport except AH
2402  * rule 2: select MIPv6 RO or inbound trigger
2403  * rule 3: select IPsec transport AH
2404  * rule 4: select IPsec tunnel
2405  * rule 5: others
2406  */
2407 static int __xfrm6_state_sort_cmp(const void *p)
2408 {
2409 	const struct xfrm_state *v = p;
2410 
2411 	switch (v->props.mode) {
2412 	case XFRM_MODE_TRANSPORT:
2413 		if (v->id.proto != IPPROTO_AH)
2414 			return 1;
2415 		else
2416 			return 3;
2417 #if IS_ENABLED(CONFIG_IPV6_MIP6)
2418 	case XFRM_MODE_ROUTEOPTIMIZATION:
2419 	case XFRM_MODE_IN_TRIGGER:
2420 		return 2;
2421 #endif
2422 	case XFRM_MODE_TUNNEL:
2423 	case XFRM_MODE_BEET:
2424 	case XFRM_MODE_IPTFS:
2425 		return 4;
2426 	}
2427 	return 5;
2428 }
2429 
2430 /* Rule for xfrm_tmpl:
2431  *
2432  * rule 1: select IPsec transport
2433  * rule 2: select MIPv6 RO or inbound trigger
2434  * rule 3: select IPsec tunnel
2435  * rule 4: others
2436  */
2437 static int __xfrm6_tmpl_sort_cmp(const void *p)
2438 {
2439 	const struct xfrm_tmpl *v = p;
2440 
2441 	switch (v->mode) {
2442 	case XFRM_MODE_TRANSPORT:
2443 		return 1;
2444 #if IS_ENABLED(CONFIG_IPV6_MIP6)
2445 	case XFRM_MODE_ROUTEOPTIMIZATION:
2446 	case XFRM_MODE_IN_TRIGGER:
2447 		return 2;
2448 #endif
2449 	case XFRM_MODE_TUNNEL:
2450 	case XFRM_MODE_BEET:
2451 	case XFRM_MODE_IPTFS:
2452 		return 3;
2453 	}
2454 	return 4;
2455 }
2456 #else
2457 static inline int __xfrm6_state_sort_cmp(const void *p) { return 5; }
2458 static inline int __xfrm6_tmpl_sort_cmp(const void *p) { return 4; }
2459 
2460 static inline void
2461 __xfrm6_sort(void **dst, void **src, int n,
2462 	     int (*cmp)(const void *p), int maxclass)
2463 {
2464 	int i;
2465 
2466 	for (i = 0; i < n; i++)
2467 		dst[i] = src[i];
2468 }
2469 #endif /* CONFIG_IPV6 */
2470 
2471 void
2472 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
2473 	       unsigned short family)
2474 {
2475 	int i;
2476 
2477 	if (family == AF_INET6)
2478 		__xfrm6_sort((void **)dst, (void **)src, n,
2479 			     __xfrm6_tmpl_sort_cmp, 5);
2480 	else
2481 		for (i = 0; i < n; i++)
2482 			dst[i] = src[i];
2483 }
2484 
2485 void
2486 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
2487 		unsigned short family)
2488 {
2489 	int i;
2490 
2491 	if (family == AF_INET6)
2492 		__xfrm6_sort((void **)dst, (void **)src, n,
2493 			     __xfrm6_state_sort_cmp, 6);
2494 	else
2495 		for (i = 0; i < n; i++)
2496 			dst[i] = src[i];
2497 }
2498 #endif
2499 
2500 /* Silly enough, but I'm lazy to build resolution list */
2501 
2502 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
2503 {
2504 	unsigned int h = xfrm_seq_hash(net, seq);
2505 	struct xfrm_state *x;
2506 
2507 	hlist_for_each_entry(x, xfrm_state_deref_prot(net->xfrm.state_byseq, net) + h, byseq) {
2508 		if (x->km.seq == seq &&
2509 		    (mark & x->mark.m) == x->mark.v &&
2510 		    x->pcpu_num == pcpu_num &&
2511 		    x->km.state == XFRM_STATE_ACQ) {
2512 			xfrm_state_hold(x);
2513 			return x;
2514 		}
2515 	}
2516 
2517 	return NULL;
2518 }
2519 
2520 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
2521 {
2522 	struct xfrm_state *x;
2523 
2524 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
2525 	x = __xfrm_find_acq_byseq(net, mark, seq, pcpu_num);
2526 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2527 	return x;
2528 }
2529 EXPORT_SYMBOL(xfrm_find_acq_byseq);
2530 
2531 u32 xfrm_get_acqseq(void)
2532 {
2533 	u32 res;
2534 	static atomic_t acqseq;
2535 
2536 	do {
2537 		res = atomic_inc_return(&acqseq);
2538 	} while (!res);
2539 
2540 	return res;
2541 }
2542 EXPORT_SYMBOL(xfrm_get_acqseq);
2543 
2544 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack)
2545 {
2546 	switch (proto) {
2547 	case IPPROTO_AH:
2548 	case IPPROTO_ESP:
2549 		break;
2550 
2551 	case IPPROTO_COMP:
2552 		/* IPCOMP spi is 16-bits. */
2553 		if (max >= 0x10000) {
2554 			NL_SET_ERR_MSG(extack, "IPCOMP SPI must be <= 65535");
2555 			return -EINVAL;
2556 		}
2557 		break;
2558 
2559 	default:
2560 		NL_SET_ERR_MSG(extack, "Invalid protocol, must be one of AH, ESP, IPCOMP");
2561 		return -EINVAL;
2562 	}
2563 
2564 	if (min > max) {
2565 		NL_SET_ERR_MSG(extack, "Invalid SPI range: min > max");
2566 		return -EINVAL;
2567 	}
2568 
2569 	return 0;
2570 }
2571 EXPORT_SYMBOL(verify_spi_info);
2572 
2573 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
2574 		   struct netlink_ext_ack *extack)
2575 {
2576 	struct net *net = xs_net(x);
2577 	unsigned int h;
2578 	struct xfrm_state *x0;
2579 	int err = -ENOENT;
2580 	u32 range = high - low + 1;
2581 	__be32 newspi = 0;
2582 
2583 	spin_lock_bh(&x->lock);
2584 	if (x->km.state == XFRM_STATE_DEAD) {
2585 		NL_SET_ERR_MSG(extack, "Target ACQUIRE is in DEAD state");
2586 		goto unlock;
2587 	}
2588 
2589 	err = 0;
2590 	if (x->id.spi)
2591 		goto unlock;
2592 
2593 	err = -ENOENT;
2594 
2595 	for (h = 0; h < range; h++) {
2596 		u32 spi = (low == high) ? low : get_random_u32_inclusive(low, high);
2597 		if (spi == 0)
2598 			goto next;
2599 		newspi = htonl(spi);
2600 
2601 		spin_lock_bh(&net->xfrm.xfrm_state_lock);
2602 		x0 = xfrm_state_lookup_spi_proto(net, newspi, x->id.proto);
2603 		if (!x0) {
2604 			x->id.spi = newspi;
2605 			h = xfrm_spi_hash(net, &x->id.daddr, newspi, x->id.proto, x->props.family);
2606 			XFRM_STATE_INSERT(byspi, &x->byspi,
2607 					  xfrm_state_deref_prot(net->xfrm.state_byspi, net) + h,
2608 					  x->xso.type);
2609 			spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2610 			err = 0;
2611 			goto unlock;
2612 		}
2613 		spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2614 
2615 next:
2616 		if (signal_pending(current)) {
2617 			err = -ERESTARTSYS;
2618 			goto unlock;
2619 		}
2620 
2621 		if (low == high)
2622 			break;
2623 	}
2624 
2625 	if (err)
2626 		NL_SET_ERR_MSG(extack, "No SPI available in the requested range");
2627 
2628 unlock:
2629 	spin_unlock_bh(&x->lock);
2630 
2631 	return err;
2632 }
2633 EXPORT_SYMBOL(xfrm_alloc_spi);
2634 
2635 static bool __xfrm_state_filter_match(struct xfrm_state *x,
2636 				      struct xfrm_address_filter *filter)
2637 {
2638 	if (filter) {
2639 		if ((filter->family == AF_INET ||
2640 		     filter->family == AF_INET6) &&
2641 		    x->props.family != filter->family)
2642 			return false;
2643 
2644 		return addr_match(&x->props.saddr, &filter->saddr,
2645 				  filter->splen) &&
2646 		       addr_match(&x->id.daddr, &filter->daddr,
2647 				  filter->dplen);
2648 	}
2649 	return true;
2650 }
2651 
2652 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
2653 		    int (*func)(struct xfrm_state *, int, void*),
2654 		    void *data)
2655 {
2656 	struct xfrm_state *state;
2657 	struct xfrm_state_walk *x;
2658 	int err = 0;
2659 
2660 	if (walk->seq != 0 && list_empty(&walk->all))
2661 		return 0;
2662 
2663 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
2664 	if (list_empty(&walk->all))
2665 		x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
2666 	else
2667 		x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
2668 	list_for_each_entry_from(x, &net->xfrm.state_all, all) {
2669 		if (x->state == XFRM_STATE_DEAD)
2670 			continue;
2671 		state = container_of(x, struct xfrm_state, km);
2672 		if (!xfrm_id_proto_match(state->id.proto, walk->proto))
2673 			continue;
2674 		if (!__xfrm_state_filter_match(state, walk->filter))
2675 			continue;
2676 		err = func(state, walk->seq, data);
2677 		if (err) {
2678 			list_move_tail(&walk->all, &x->all);
2679 			goto out;
2680 		}
2681 		walk->seq++;
2682 	}
2683 	if (walk->seq == 0) {
2684 		err = -ENOENT;
2685 		goto out;
2686 	}
2687 	list_del_init(&walk->all);
2688 out:
2689 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2690 	return err;
2691 }
2692 EXPORT_SYMBOL(xfrm_state_walk);
2693 
2694 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
2695 			  struct xfrm_address_filter *filter)
2696 {
2697 	INIT_LIST_HEAD(&walk->all);
2698 	walk->proto = proto;
2699 	walk->state = XFRM_STATE_DEAD;
2700 	walk->seq = 0;
2701 	walk->filter = filter;
2702 }
2703 EXPORT_SYMBOL(xfrm_state_walk_init);
2704 
2705 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
2706 {
2707 	kfree(walk->filter);
2708 
2709 	if (list_empty(&walk->all))
2710 		return;
2711 
2712 	spin_lock_bh(&net->xfrm.xfrm_state_lock);
2713 	list_del(&walk->all);
2714 	spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2715 }
2716 EXPORT_SYMBOL(xfrm_state_walk_done);
2717 
2718 static void xfrm_replay_timer_handler(struct timer_list *t)
2719 {
2720 	struct xfrm_state *x = timer_container_of(x, t, rtimer);
2721 
2722 	spin_lock(&x->lock);
2723 
2724 	if (x->km.state == XFRM_STATE_VALID) {
2725 		if (xfrm_aevent_is_on(xs_net(x)))
2726 			xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
2727 		else
2728 			x->xflags |= XFRM_TIME_DEFER;
2729 	}
2730 
2731 	spin_unlock(&x->lock);
2732 }
2733 
2734 static LIST_HEAD(xfrm_km_list);
2735 
2736 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2737 {
2738 	struct xfrm_mgr *km;
2739 
2740 	rcu_read_lock();
2741 	list_for_each_entry_rcu(km, &xfrm_km_list, list)
2742 		if (km->notify_policy)
2743 			km->notify_policy(xp, dir, c);
2744 	rcu_read_unlock();
2745 }
2746 
2747 void km_state_notify(struct xfrm_state *x, const struct km_event *c)
2748 {
2749 	struct xfrm_mgr *km;
2750 	rcu_read_lock();
2751 	list_for_each_entry_rcu(km, &xfrm_km_list, list)
2752 		if (km->notify)
2753 			km->notify(x, c);
2754 	rcu_read_unlock();
2755 }
2756 
2757 EXPORT_SYMBOL(km_policy_notify);
2758 EXPORT_SYMBOL(km_state_notify);
2759 
2760 void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
2761 {
2762 	struct km_event c;
2763 
2764 	c.data.hard = hard;
2765 	c.portid = portid;
2766 	c.event = XFRM_MSG_EXPIRE;
2767 	km_state_notify(x, &c);
2768 }
2769 
2770 EXPORT_SYMBOL(km_state_expired);
2771 /*
2772  * We send to all registered managers regardless of failure
2773  * We are happy with one success
2774 */
2775 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
2776 {
2777 	int err = -EINVAL, acqret;
2778 	struct xfrm_mgr *km;
2779 
2780 	rcu_read_lock();
2781 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2782 		acqret = km->acquire(x, t, pol);
2783 		if (!acqret)
2784 			err = acqret;
2785 	}
2786 	rcu_read_unlock();
2787 	return err;
2788 }
2789 EXPORT_SYMBOL(km_query);
2790 
2791 static int __km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2792 {
2793 	int err = -EINVAL;
2794 	struct xfrm_mgr *km;
2795 
2796 	rcu_read_lock();
2797 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2798 		if (km->new_mapping)
2799 			err = km->new_mapping(x, ipaddr, sport);
2800 		if (!err)
2801 			break;
2802 	}
2803 	rcu_read_unlock();
2804 	return err;
2805 }
2806 
2807 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2808 {
2809 	int ret = 0;
2810 
2811 	if (x->mapping_maxage) {
2812 		if ((jiffies / HZ - x->new_mapping) > x->mapping_maxage ||
2813 		    x->new_mapping_sport != sport) {
2814 			x->new_mapping_sport = sport;
2815 			x->new_mapping = jiffies / HZ;
2816 			ret = __km_new_mapping(x, ipaddr, sport);
2817 		}
2818 	} else {
2819 		ret = __km_new_mapping(x, ipaddr, sport);
2820 	}
2821 
2822 	return ret;
2823 }
2824 EXPORT_SYMBOL(km_new_mapping);
2825 
2826 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
2827 {
2828 	struct km_event c;
2829 
2830 	c.data.hard = hard;
2831 	c.portid = portid;
2832 	c.event = XFRM_MSG_POLEXPIRE;
2833 	km_policy_notify(pol, dir, &c);
2834 }
2835 EXPORT_SYMBOL(km_policy_expired);
2836 
2837 #ifdef CONFIG_XFRM_MIGRATE
2838 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2839 	       const struct xfrm_migrate *m, int num_migrate,
2840 	       const struct xfrm_kmaddress *k,
2841 	       const struct xfrm_encap_tmpl *encap)
2842 {
2843 	int err = -EINVAL;
2844 	int ret;
2845 	struct xfrm_mgr *km;
2846 
2847 	rcu_read_lock();
2848 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2849 		if (km->migrate) {
2850 			ret = km->migrate(sel, dir, type, m, num_migrate, k,
2851 					  encap);
2852 			if (!ret)
2853 				err = ret;
2854 		}
2855 	}
2856 	rcu_read_unlock();
2857 	return err;
2858 }
2859 EXPORT_SYMBOL(km_migrate);
2860 #endif
2861 
2862 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
2863 {
2864 	int err = -EINVAL;
2865 	int ret;
2866 	struct xfrm_mgr *km;
2867 
2868 	rcu_read_lock();
2869 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2870 		if (km->report) {
2871 			ret = km->report(net, proto, sel, addr);
2872 			if (!ret)
2873 				err = ret;
2874 		}
2875 	}
2876 	rcu_read_unlock();
2877 	return err;
2878 }
2879 EXPORT_SYMBOL(km_report);
2880 
2881 static bool km_is_alive(const struct km_event *c)
2882 {
2883 	struct xfrm_mgr *km;
2884 	bool is_alive = false;
2885 
2886 	rcu_read_lock();
2887 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2888 		if (km->is_alive && km->is_alive(c)) {
2889 			is_alive = true;
2890 			break;
2891 		}
2892 	}
2893 	rcu_read_unlock();
2894 
2895 	return is_alive;
2896 }
2897 
2898 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2899 static DEFINE_SPINLOCK(xfrm_translator_lock);
2900 static struct xfrm_translator __rcu *xfrm_translator;
2901 
2902 struct xfrm_translator *xfrm_get_translator(void)
2903 {
2904 	struct xfrm_translator *xtr;
2905 
2906 	rcu_read_lock();
2907 	xtr = rcu_dereference(xfrm_translator);
2908 	if (unlikely(!xtr))
2909 		goto out;
2910 	if (!try_module_get(xtr->owner))
2911 		xtr = NULL;
2912 out:
2913 	rcu_read_unlock();
2914 	return xtr;
2915 }
2916 EXPORT_SYMBOL_GPL(xfrm_get_translator);
2917 
2918 void xfrm_put_translator(struct xfrm_translator *xtr)
2919 {
2920 	module_put(xtr->owner);
2921 }
2922 EXPORT_SYMBOL_GPL(xfrm_put_translator);
2923 
2924 int xfrm_register_translator(struct xfrm_translator *xtr)
2925 {
2926 	int err = 0;
2927 
2928 	spin_lock_bh(&xfrm_translator_lock);
2929 	if (unlikely(xfrm_translator != NULL))
2930 		err = -EEXIST;
2931 	else
2932 		rcu_assign_pointer(xfrm_translator, xtr);
2933 	spin_unlock_bh(&xfrm_translator_lock);
2934 
2935 	return err;
2936 }
2937 EXPORT_SYMBOL_GPL(xfrm_register_translator);
2938 
2939 int xfrm_unregister_translator(struct xfrm_translator *xtr)
2940 {
2941 	int err = 0;
2942 
2943 	spin_lock_bh(&xfrm_translator_lock);
2944 	if (likely(xfrm_translator != NULL)) {
2945 		if (rcu_access_pointer(xfrm_translator) != xtr)
2946 			err = -EINVAL;
2947 		else
2948 			RCU_INIT_POINTER(xfrm_translator, NULL);
2949 	}
2950 	spin_unlock_bh(&xfrm_translator_lock);
2951 	synchronize_rcu();
2952 
2953 	return err;
2954 }
2955 EXPORT_SYMBOL_GPL(xfrm_unregister_translator);
2956 #endif
2957 
2958 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen)
2959 {
2960 	int err;
2961 	u8 *data;
2962 	struct xfrm_mgr *km;
2963 	struct xfrm_policy *pol = NULL;
2964 
2965 	if (sockptr_is_null(optval) && !optlen) {
2966 		xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2967 		xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2968 		__sk_dst_reset(sk);
2969 		return 0;
2970 	}
2971 
2972 	if (optlen <= 0 || optlen > PAGE_SIZE)
2973 		return -EMSGSIZE;
2974 
2975 	data = memdup_sockptr(optval, optlen);
2976 	if (IS_ERR(data))
2977 		return PTR_ERR(data);
2978 
2979 	if (in_compat_syscall()) {
2980 		struct xfrm_translator *xtr = xfrm_get_translator();
2981 
2982 		if (!xtr) {
2983 			kfree(data);
2984 			return -EOPNOTSUPP;
2985 		}
2986 
2987 		err = xtr->xlate_user_policy_sockptr(&data, optlen);
2988 		xfrm_put_translator(xtr);
2989 		if (err) {
2990 			kfree(data);
2991 			return err;
2992 		}
2993 	}
2994 
2995 	err = -EINVAL;
2996 	rcu_read_lock();
2997 	list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2998 		pol = km->compile_policy(sk, optname, data,
2999 					 optlen, &err);
3000 		if (err >= 0)
3001 			break;
3002 	}
3003 	rcu_read_unlock();
3004 
3005 	if (err >= 0) {
3006 		xfrm_sk_policy_insert(sk, err, pol);
3007 		xfrm_pol_put(pol);
3008 		__sk_dst_reset(sk);
3009 		err = 0;
3010 	}
3011 
3012 	kfree(data);
3013 	return err;
3014 }
3015 EXPORT_SYMBOL(xfrm_user_policy);
3016 
3017 static DEFINE_SPINLOCK(xfrm_km_lock);
3018 
3019 void xfrm_register_km(struct xfrm_mgr *km)
3020 {
3021 	spin_lock_bh(&xfrm_km_lock);
3022 	list_add_tail_rcu(&km->list, &xfrm_km_list);
3023 	spin_unlock_bh(&xfrm_km_lock);
3024 }
3025 EXPORT_SYMBOL(xfrm_register_km);
3026 
3027 void xfrm_unregister_km(struct xfrm_mgr *km)
3028 {
3029 	spin_lock_bh(&xfrm_km_lock);
3030 	list_del_rcu(&km->list);
3031 	spin_unlock_bh(&xfrm_km_lock);
3032 	synchronize_rcu();
3033 }
3034 EXPORT_SYMBOL(xfrm_unregister_km);
3035 
3036 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
3037 {
3038 	int err = 0;
3039 
3040 	if (WARN_ON(afinfo->family >= NPROTO))
3041 		return -EAFNOSUPPORT;
3042 
3043 	spin_lock_bh(&xfrm_state_afinfo_lock);
3044 	if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
3045 		err = -EEXIST;
3046 	else
3047 		rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
3048 	spin_unlock_bh(&xfrm_state_afinfo_lock);
3049 	return err;
3050 }
3051 EXPORT_SYMBOL(xfrm_state_register_afinfo);
3052 
3053 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
3054 {
3055 	int err = 0, family = afinfo->family;
3056 
3057 	if (WARN_ON(family >= NPROTO))
3058 		return -EAFNOSUPPORT;
3059 
3060 	spin_lock_bh(&xfrm_state_afinfo_lock);
3061 	if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
3062 		if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
3063 			err = -EINVAL;
3064 		else
3065 			RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
3066 	}
3067 	spin_unlock_bh(&xfrm_state_afinfo_lock);
3068 	synchronize_rcu();
3069 	return err;
3070 }
3071 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
3072 
3073 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
3074 {
3075 	if (unlikely(family >= NPROTO))
3076 		return NULL;
3077 
3078 	return rcu_dereference(xfrm_state_afinfo[family]);
3079 }
3080 EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu);
3081 
3082 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
3083 {
3084 	struct xfrm_state_afinfo *afinfo;
3085 	if (unlikely(family >= NPROTO))
3086 		return NULL;
3087 	rcu_read_lock();
3088 	afinfo = rcu_dereference(xfrm_state_afinfo[family]);
3089 	if (unlikely(!afinfo))
3090 		rcu_read_unlock();
3091 	return afinfo;
3092 }
3093 
3094 void xfrm_flush_gc(void)
3095 {
3096 	flush_work(&xfrm_state_gc_work);
3097 }
3098 EXPORT_SYMBOL(xfrm_flush_gc);
3099 
3100 static void xfrm_state_delete_tunnel(struct xfrm_state *x)
3101 {
3102 	if (x->tunnel) {
3103 		struct xfrm_state *t = x->tunnel;
3104 
3105 		if (atomic_dec_return(&t->tunnel_users) == 1)
3106 			xfrm_state_delete(t);
3107 		xfrm_state_put(t);
3108 		x->tunnel = NULL;
3109 	}
3110 }
3111 
3112 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
3113 {
3114 	const struct xfrm_type *type = READ_ONCE(x->type);
3115 	struct crypto_aead *aead;
3116 	u32 blksize, net_adj = 0;
3117 
3118 	if (x->km.state != XFRM_STATE_VALID ||
3119 	    !type || type->proto != IPPROTO_ESP)
3120 		return mtu - x->props.header_len;
3121 
3122 	aead = x->data;
3123 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
3124 
3125 	switch (x->props.mode) {
3126 	case XFRM_MODE_TRANSPORT:
3127 	case XFRM_MODE_BEET:
3128 		if (x->props.family == AF_INET)
3129 			net_adj = sizeof(struct iphdr);
3130 		else if (x->props.family == AF_INET6)
3131 			net_adj = sizeof(struct ipv6hdr);
3132 		break;
3133 	case XFRM_MODE_TUNNEL:
3134 		break;
3135 	default:
3136 		if (x->mode_cbs && x->mode_cbs->get_inner_mtu)
3137 			return x->mode_cbs->get_inner_mtu(x, mtu);
3138 
3139 		WARN_ON_ONCE(1);
3140 		break;
3141 	}
3142 
3143 	return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
3144 		 net_adj) & ~(blksize - 1)) + net_adj - 2;
3145 }
3146 EXPORT_SYMBOL_GPL(xfrm_state_mtu);
3147 
3148 int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
3149 {
3150 	const struct xfrm_mode *inner_mode;
3151 	const struct xfrm_mode *outer_mode;
3152 	int family = x->props.family;
3153 	int err;
3154 
3155 	if (family == AF_INET &&
3156 	    (!x->dir || x->dir == XFRM_SA_DIR_OUT) &&
3157 	    READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc))
3158 		x->props.flags |= XFRM_STATE_NOPMTUDISC;
3159 
3160 	err = -EPROTONOSUPPORT;
3161 
3162 	if (x->sel.family != AF_UNSPEC) {
3163 		inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
3164 		if (inner_mode == NULL) {
3165 			NL_SET_ERR_MSG(extack, "Requested mode not found");
3166 			goto error;
3167 		}
3168 
3169 		if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
3170 		    family != x->sel.family) {
3171 			NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate a change of family");
3172 			goto error;
3173 		}
3174 
3175 		x->inner_mode = *inner_mode;
3176 	} else {
3177 		const struct xfrm_mode *inner_mode_iaf;
3178 		int iafamily = AF_INET;
3179 
3180 		inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
3181 		if (inner_mode == NULL) {
3182 			NL_SET_ERR_MSG(extack, "Requested mode not found");
3183 			goto error;
3184 		}
3185 
3186 		x->inner_mode = *inner_mode;
3187 
3188 		if (x->props.family == AF_INET)
3189 			iafamily = AF_INET6;
3190 
3191 		inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
3192 		if (inner_mode_iaf) {
3193 			if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
3194 				x->inner_mode_iaf = *inner_mode_iaf;
3195 		}
3196 	}
3197 
3198 	x->type = xfrm_get_type(x->id.proto, family);
3199 	if (x->type == NULL) {
3200 		NL_SET_ERR_MSG(extack, "Requested type not found");
3201 		goto error;
3202 	}
3203 
3204 	err = x->type->init_state(x, extack);
3205 	if (err)
3206 		goto error;
3207 
3208 	outer_mode = xfrm_get_mode(x->props.mode, family);
3209 	if (!outer_mode) {
3210 		NL_SET_ERR_MSG(extack, "Requested mode not found");
3211 		err = -EPROTONOSUPPORT;
3212 		goto error;
3213 	}
3214 
3215 	x->outer_mode = *outer_mode;
3216 	if (x->nat_keepalive_interval) {
3217 		if (x->dir != XFRM_SA_DIR_OUT) {
3218 			NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs");
3219 			err = -EINVAL;
3220 			goto error;
3221 		}
3222 
3223 		if (!x->encap || x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
3224 			NL_SET_ERR_MSG(extack,
3225 				       "NAT keepalive is only supported for UDP encapsulation");
3226 			err = -EINVAL;
3227 			goto error;
3228 		}
3229 	}
3230 
3231 	x->mode_cbs = xfrm_get_mode_cbs(x->props.mode);
3232 	if (x->mode_cbs) {
3233 		if (x->mode_cbs->init_state)
3234 			err = x->mode_cbs->init_state(x);
3235 		module_put(x->mode_cbs->owner);
3236 	}
3237 error:
3238 	return err;
3239 }
3240 
3241 EXPORT_SYMBOL(__xfrm_init_state);
3242 
3243 int xfrm_init_state(struct xfrm_state *x)
3244 {
3245 	int err;
3246 
3247 	err = __xfrm_init_state(x, NULL);
3248 	if (err)
3249 		return err;
3250 
3251 	err = xfrm_init_replay(x, NULL);
3252 	if (err)
3253 		return err;
3254 
3255 	x->km.state = XFRM_STATE_VALID;
3256 	return 0;
3257 }
3258 
3259 EXPORT_SYMBOL(xfrm_init_state);
3260 
3261 int __net_init xfrm_state_init(struct net *net)
3262 {
3263 	struct hlist_head *ndst, *nsrc, *nspi, *nseq;
3264 	unsigned int sz;
3265 
3266 	if (net_eq(net, &init_net))
3267 		xfrm_state_cache = KMEM_CACHE(xfrm_state,
3268 					      SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3269 
3270 	INIT_LIST_HEAD(&net->xfrm.state_all);
3271 
3272 	sz = sizeof(struct hlist_head) * 8;
3273 
3274 	ndst = xfrm_hash_alloc(sz);
3275 	if (!ndst)
3276 		goto out_bydst;
3277 	rcu_assign_pointer(net->xfrm.state_bydst, ndst);
3278 
3279 	nsrc = xfrm_hash_alloc(sz);
3280 	if (!nsrc)
3281 		goto out_bysrc;
3282 	rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
3283 
3284 	nspi = xfrm_hash_alloc(sz);
3285 	if (!nspi)
3286 		goto out_byspi;
3287 	rcu_assign_pointer(net->xfrm.state_byspi, nspi);
3288 
3289 	nseq = xfrm_hash_alloc(sz);
3290 	if (!nseq)
3291 		goto out_byseq;
3292 	rcu_assign_pointer(net->xfrm.state_byseq, nseq);
3293 
3294 	net->xfrm.state_cache_input = alloc_percpu(struct hlist_head);
3295 	if (!net->xfrm.state_cache_input)
3296 		goto out_state_cache_input;
3297 
3298 	net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
3299 
3300 	net->xfrm.state_num = 0;
3301 	INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
3302 	spin_lock_init(&net->xfrm.xfrm_state_lock);
3303 	seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation,
3304 			       &net->xfrm.xfrm_state_lock);
3305 	return 0;
3306 
3307 out_state_cache_input:
3308 	xfrm_hash_free(nseq, sz);
3309 out_byseq:
3310 	xfrm_hash_free(nspi, sz);
3311 out_byspi:
3312 	xfrm_hash_free(nsrc, sz);
3313 out_bysrc:
3314 	xfrm_hash_free(ndst, sz);
3315 out_bydst:
3316 	return -ENOMEM;
3317 }
3318 
3319 #define xfrm_state_deref_netexit(table) \
3320 	rcu_dereference_protected((table), true /* netns is going away */)
3321 void xfrm_state_fini(struct net *net)
3322 {
3323 	unsigned int sz;
3324 	int i;
3325 
3326 	flush_work(&net->xfrm.state_hash_work);
3327 	xfrm_state_flush(net, 0, false);
3328 	flush_work(&xfrm_state_gc_work);
3329 
3330 	WARN_ON(!list_empty(&net->xfrm.state_all));
3331 
3332 	for (i = 0; i <= net->xfrm.state_hmask; i++) {
3333 		WARN_ON(!hlist_empty(xfrm_state_deref_netexit(net->xfrm.state_byseq) + i));
3334 		WARN_ON(!hlist_empty(xfrm_state_deref_netexit(net->xfrm.state_byspi) + i));
3335 		WARN_ON(!hlist_empty(xfrm_state_deref_netexit(net->xfrm.state_bysrc) + i));
3336 		WARN_ON(!hlist_empty(xfrm_state_deref_netexit(net->xfrm.state_bydst) + i));
3337 	}
3338 
3339 	sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
3340 	xfrm_hash_free(xfrm_state_deref_netexit(net->xfrm.state_byseq), sz);
3341 	xfrm_hash_free(xfrm_state_deref_netexit(net->xfrm.state_byspi), sz);
3342 	xfrm_hash_free(xfrm_state_deref_netexit(net->xfrm.state_bysrc), sz);
3343 	xfrm_hash_free(xfrm_state_deref_netexit(net->xfrm.state_bydst), sz);
3344 	free_percpu(net->xfrm.state_cache_input);
3345 }
3346 
3347 #ifdef CONFIG_AUDITSYSCALL
3348 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
3349 				     struct audit_buffer *audit_buf)
3350 {
3351 	struct xfrm_sec_ctx *ctx = x->security;
3352 	u32 spi = ntohl(x->id.spi);
3353 
3354 	if (ctx)
3355 		audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
3356 				 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3357 
3358 	switch (x->props.family) {
3359 	case AF_INET:
3360 		audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
3361 				 &x->props.saddr.a4, &x->id.daddr.a4);
3362 		break;
3363 	case AF_INET6:
3364 		audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
3365 				 x->props.saddr.a6, x->id.daddr.a6);
3366 		break;
3367 	}
3368 
3369 	audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
3370 }
3371 
3372 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
3373 				      struct audit_buffer *audit_buf)
3374 {
3375 	const struct iphdr *iph4;
3376 	const struct ipv6hdr *iph6;
3377 
3378 	switch (family) {
3379 	case AF_INET:
3380 		iph4 = ip_hdr(skb);
3381 		audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
3382 				 &iph4->saddr, &iph4->daddr);
3383 		break;
3384 	case AF_INET6:
3385 		iph6 = ipv6_hdr(skb);
3386 		audit_log_format(audit_buf,
3387 				 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
3388 				 &iph6->saddr, &iph6->daddr,
3389 				 iph6->flow_lbl[0] & 0x0f,
3390 				 iph6->flow_lbl[1],
3391 				 iph6->flow_lbl[2]);
3392 		break;
3393 	}
3394 }
3395 
3396 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
3397 {
3398 	struct audit_buffer *audit_buf;
3399 
3400 	audit_buf = xfrm_audit_start("SAD-add");
3401 	if (audit_buf == NULL)
3402 		return;
3403 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3404 	xfrm_audit_helper_sainfo(x, audit_buf);
3405 	audit_log_format(audit_buf, " res=%u", result);
3406 	audit_log_end(audit_buf);
3407 }
3408 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
3409 
3410 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
3411 {
3412 	struct audit_buffer *audit_buf;
3413 
3414 	audit_buf = xfrm_audit_start("SAD-delete");
3415 	if (audit_buf == NULL)
3416 		return;
3417 	xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3418 	xfrm_audit_helper_sainfo(x, audit_buf);
3419 	audit_log_format(audit_buf, " res=%u", result);
3420 	audit_log_end(audit_buf);
3421 }
3422 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
3423 
3424 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
3425 				      struct sk_buff *skb)
3426 {
3427 	struct audit_buffer *audit_buf;
3428 	u32 spi;
3429 
3430 	audit_buf = xfrm_audit_start("SA-replay-overflow");
3431 	if (audit_buf == NULL)
3432 		return;
3433 	xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3434 	/* don't record the sequence number because it's inherent in this kind
3435 	 * of audit message */
3436 	spi = ntohl(x->id.spi);
3437 	audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
3438 	audit_log_end(audit_buf);
3439 }
3440 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
3441 
3442 void xfrm_audit_state_replay(struct xfrm_state *x,
3443 			     struct sk_buff *skb, __be32 net_seq)
3444 {
3445 	struct audit_buffer *audit_buf;
3446 	u32 spi;
3447 
3448 	audit_buf = xfrm_audit_start("SA-replayed-pkt");
3449 	if (audit_buf == NULL)
3450 		return;
3451 	xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3452 	spi = ntohl(x->id.spi);
3453 	audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3454 			 spi, spi, ntohl(net_seq));
3455 	audit_log_end(audit_buf);
3456 }
3457 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
3458 
3459 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
3460 {
3461 	struct audit_buffer *audit_buf;
3462 
3463 	audit_buf = xfrm_audit_start("SA-notfound");
3464 	if (audit_buf == NULL)
3465 		return;
3466 	xfrm_audit_helper_pktinfo(skb, family, audit_buf);
3467 	audit_log_end(audit_buf);
3468 }
3469 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
3470 
3471 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
3472 			       __be32 net_spi, __be32 net_seq)
3473 {
3474 	struct audit_buffer *audit_buf;
3475 	u32 spi;
3476 
3477 	audit_buf = xfrm_audit_start("SA-notfound");
3478 	if (audit_buf == NULL)
3479 		return;
3480 	xfrm_audit_helper_pktinfo(skb, family, audit_buf);
3481 	spi = ntohl(net_spi);
3482 	audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3483 			 spi, spi, ntohl(net_seq));
3484 	audit_log_end(audit_buf);
3485 }
3486 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
3487 
3488 void xfrm_audit_state_icvfail(struct xfrm_state *x,
3489 			      struct sk_buff *skb, u8 proto)
3490 {
3491 	struct audit_buffer *audit_buf;
3492 	__be32 net_spi;
3493 	__be32 net_seq;
3494 
3495 	audit_buf = xfrm_audit_start("SA-icv-failure");
3496 	if (audit_buf == NULL)
3497 		return;
3498 	xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3499 	if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
3500 		u32 spi = ntohl(net_spi);
3501 		audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3502 				 spi, spi, ntohl(net_seq));
3503 	}
3504 	audit_log_end(audit_buf);
3505 }
3506 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
3507 #endif /* CONFIG_AUDITSYSCALL */
3508