1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * xfrm_state.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * YOSHIFUJI Hideaki @USAGI
11 * Split up af-specific functions
12 * Derek Atkins <derek@ihtfp.com>
13 * Add UDP Encapsulation
14 *
15 */
16
17 #include <linux/compat.h>
18 #include <linux/workqueue.h>
19 #include <net/xfrm.h>
20 #include <linux/pfkeyv2.h>
21 #include <linux/ipsec.h>
22 #include <linux/module.h>
23 #include <linux/cache.h>
24 #include <linux/audit.h>
25 #include <linux/uaccess.h>
26 #include <linux/ktime.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/kernel.h>
30
31 #include <crypto/aead.h>
32
33 #include "xfrm_hash.h"
34
35 #define xfrm_state_deref_prot(table, net) \
36 rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
37 #define xfrm_state_deref_check(table, net) \
38 rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
39
40 static void xfrm_state_gc_task(struct work_struct *work);
41
42 /* Each xfrm_state may be linked to two tables:
43
44 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
45 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
46 destination/tunnel endpoint. (output)
47 */
48
49 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
50 static struct kmem_cache *xfrm_state_cache __ro_after_init;
51
52 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
53 static HLIST_HEAD(xfrm_state_gc_list);
54 static HLIST_HEAD(xfrm_state_dev_gc_list);
55
xfrm_state_hold_rcu(struct xfrm_state __rcu * x)56 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
57 {
58 return refcount_inc_not_zero(&x->refcnt);
59 }
60
xfrm_dst_hash(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,u32 reqid,unsigned short family)61 static inline unsigned int xfrm_dst_hash(struct net *net,
62 const xfrm_address_t *daddr,
63 const xfrm_address_t *saddr,
64 u32 reqid,
65 unsigned short family)
66 {
67 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
68
69 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
70 }
71
xfrm_src_hash(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)72 static inline unsigned int xfrm_src_hash(struct net *net,
73 const xfrm_address_t *daddr,
74 const xfrm_address_t *saddr,
75 unsigned short family)
76 {
77 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
78
79 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
80 }
81
82 static inline unsigned int
xfrm_spi_hash(struct net * net,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)83 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
84 __be32 spi, u8 proto, unsigned short family)
85 {
86 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
87
88 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
89 }
90
xfrm_seq_hash(struct net * net,u32 seq)91 static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
92 {
93 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
94
95 return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
96 }
97
98 #define XFRM_STATE_INSERT(by, _n, _h, _type) \
99 { \
100 struct xfrm_state *_x = NULL; \
101 \
102 if (_type != XFRM_DEV_OFFLOAD_PACKET) { \
103 hlist_for_each_entry_rcu(_x, _h, by) { \
104 if (_x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
105 continue; \
106 break; \
107 } \
108 } \
109 \
110 if (!_x || _x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
111 /* SAD is empty or consist from HW SAs only */ \
112 hlist_add_head_rcu(_n, _h); \
113 else \
114 hlist_add_before_rcu(_n, &_x->by); \
115 }
116
xfrm_hash_transfer(struct hlist_head * list,struct hlist_head * ndsttable,struct hlist_head * nsrctable,struct hlist_head * nspitable,struct hlist_head * nseqtable,unsigned int nhashmask)117 static void xfrm_hash_transfer(struct hlist_head *list,
118 struct hlist_head *ndsttable,
119 struct hlist_head *nsrctable,
120 struct hlist_head *nspitable,
121 struct hlist_head *nseqtable,
122 unsigned int nhashmask)
123 {
124 struct hlist_node *tmp;
125 struct xfrm_state *x;
126
127 hlist_for_each_entry_safe(x, tmp, list, bydst) {
128 unsigned int h;
129
130 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
131 x->props.reqid, x->props.family,
132 nhashmask);
133 XFRM_STATE_INSERT(bydst, &x->bydst, ndsttable + h, x->xso.type);
134
135 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
136 x->props.family,
137 nhashmask);
138 XFRM_STATE_INSERT(bysrc, &x->bysrc, nsrctable + h, x->xso.type);
139
140 if (x->id.spi) {
141 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
142 x->id.proto, x->props.family,
143 nhashmask);
144 XFRM_STATE_INSERT(byspi, &x->byspi, nspitable + h,
145 x->xso.type);
146 }
147
148 if (x->km.seq) {
149 h = __xfrm_seq_hash(x->km.seq, nhashmask);
150 XFRM_STATE_INSERT(byseq, &x->byseq, nseqtable + h,
151 x->xso.type);
152 }
153 }
154 }
155
xfrm_hash_new_size(unsigned int state_hmask)156 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
157 {
158 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
159 }
160
xfrm_hash_resize(struct work_struct * work)161 static void xfrm_hash_resize(struct work_struct *work)
162 {
163 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
164 struct hlist_head *ndst, *nsrc, *nspi, *nseq, *odst, *osrc, *ospi, *oseq;
165 unsigned long nsize, osize;
166 unsigned int nhashmask, ohashmask;
167 int i;
168
169 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
170 ndst = xfrm_hash_alloc(nsize);
171 if (!ndst)
172 return;
173 nsrc = xfrm_hash_alloc(nsize);
174 if (!nsrc) {
175 xfrm_hash_free(ndst, nsize);
176 return;
177 }
178 nspi = xfrm_hash_alloc(nsize);
179 if (!nspi) {
180 xfrm_hash_free(ndst, nsize);
181 xfrm_hash_free(nsrc, nsize);
182 return;
183 }
184 nseq = xfrm_hash_alloc(nsize);
185 if (!nseq) {
186 xfrm_hash_free(ndst, nsize);
187 xfrm_hash_free(nsrc, nsize);
188 xfrm_hash_free(nspi, nsize);
189 return;
190 }
191
192 spin_lock_bh(&net->xfrm.xfrm_state_lock);
193 write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
194
195 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
196 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
197 for (i = net->xfrm.state_hmask; i >= 0; i--)
198 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nseq, nhashmask);
199
200 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
201 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
202 oseq = xfrm_state_deref_prot(net->xfrm.state_byseq, net);
203 ohashmask = net->xfrm.state_hmask;
204
205 rcu_assign_pointer(net->xfrm.state_bydst, ndst);
206 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
207 rcu_assign_pointer(net->xfrm.state_byspi, nspi);
208 rcu_assign_pointer(net->xfrm.state_byseq, nseq);
209 net->xfrm.state_hmask = nhashmask;
210
211 write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
212 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
213
214 osize = (ohashmask + 1) * sizeof(struct hlist_head);
215
216 synchronize_rcu();
217
218 xfrm_hash_free(odst, osize);
219 xfrm_hash_free(osrc, osize);
220 xfrm_hash_free(ospi, osize);
221 xfrm_hash_free(oseq, osize);
222 }
223
224 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
225 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
226
227 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
228 static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock);
229
230 int __xfrm_state_delete(struct xfrm_state *x);
231
232 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
233 static bool km_is_alive(const struct km_event *c);
234 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
235
xfrm_register_type(const struct xfrm_type * type,unsigned short family)236 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
237 {
238 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
239 int err = 0;
240
241 if (!afinfo)
242 return -EAFNOSUPPORT;
243
244 #define X(afi, T, name) do { \
245 WARN_ON((afi)->type_ ## name); \
246 (afi)->type_ ## name = (T); \
247 } while (0)
248
249 switch (type->proto) {
250 case IPPROTO_COMP:
251 X(afinfo, type, comp);
252 break;
253 case IPPROTO_AH:
254 X(afinfo, type, ah);
255 break;
256 case IPPROTO_ESP:
257 X(afinfo, type, esp);
258 break;
259 case IPPROTO_IPIP:
260 X(afinfo, type, ipip);
261 break;
262 case IPPROTO_DSTOPTS:
263 X(afinfo, type, dstopts);
264 break;
265 case IPPROTO_ROUTING:
266 X(afinfo, type, routing);
267 break;
268 case IPPROTO_IPV6:
269 X(afinfo, type, ipip6);
270 break;
271 default:
272 WARN_ON(1);
273 err = -EPROTONOSUPPORT;
274 break;
275 }
276 #undef X
277 rcu_read_unlock();
278 return err;
279 }
280 EXPORT_SYMBOL(xfrm_register_type);
281
xfrm_unregister_type(const struct xfrm_type * type,unsigned short family)282 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
283 {
284 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
285
286 if (unlikely(afinfo == NULL))
287 return;
288
289 #define X(afi, T, name) do { \
290 WARN_ON((afi)->type_ ## name != (T)); \
291 (afi)->type_ ## name = NULL; \
292 } while (0)
293
294 switch (type->proto) {
295 case IPPROTO_COMP:
296 X(afinfo, type, comp);
297 break;
298 case IPPROTO_AH:
299 X(afinfo, type, ah);
300 break;
301 case IPPROTO_ESP:
302 X(afinfo, type, esp);
303 break;
304 case IPPROTO_IPIP:
305 X(afinfo, type, ipip);
306 break;
307 case IPPROTO_DSTOPTS:
308 X(afinfo, type, dstopts);
309 break;
310 case IPPROTO_ROUTING:
311 X(afinfo, type, routing);
312 break;
313 case IPPROTO_IPV6:
314 X(afinfo, type, ipip6);
315 break;
316 default:
317 WARN_ON(1);
318 break;
319 }
320 #undef X
321 rcu_read_unlock();
322 }
323 EXPORT_SYMBOL(xfrm_unregister_type);
324
xfrm_get_type(u8 proto,unsigned short family)325 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
326 {
327 const struct xfrm_type *type = NULL;
328 struct xfrm_state_afinfo *afinfo;
329 int modload_attempted = 0;
330
331 retry:
332 afinfo = xfrm_state_get_afinfo(family);
333 if (unlikely(afinfo == NULL))
334 return NULL;
335
336 switch (proto) {
337 case IPPROTO_COMP:
338 type = afinfo->type_comp;
339 break;
340 case IPPROTO_AH:
341 type = afinfo->type_ah;
342 break;
343 case IPPROTO_ESP:
344 type = afinfo->type_esp;
345 break;
346 case IPPROTO_IPIP:
347 type = afinfo->type_ipip;
348 break;
349 case IPPROTO_DSTOPTS:
350 type = afinfo->type_dstopts;
351 break;
352 case IPPROTO_ROUTING:
353 type = afinfo->type_routing;
354 break;
355 case IPPROTO_IPV6:
356 type = afinfo->type_ipip6;
357 break;
358 default:
359 break;
360 }
361
362 if (unlikely(type && !try_module_get(type->owner)))
363 type = NULL;
364
365 rcu_read_unlock();
366
367 if (!type && !modload_attempted) {
368 request_module("xfrm-type-%d-%d", family, proto);
369 modload_attempted = 1;
370 goto retry;
371 }
372
373 return type;
374 }
375
xfrm_put_type(const struct xfrm_type * type)376 static void xfrm_put_type(const struct xfrm_type *type)
377 {
378 module_put(type->owner);
379 }
380
xfrm_register_type_offload(const struct xfrm_type_offload * type,unsigned short family)381 int xfrm_register_type_offload(const struct xfrm_type_offload *type,
382 unsigned short family)
383 {
384 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
385 int err = 0;
386
387 if (unlikely(afinfo == NULL))
388 return -EAFNOSUPPORT;
389
390 switch (type->proto) {
391 case IPPROTO_ESP:
392 WARN_ON(afinfo->type_offload_esp);
393 afinfo->type_offload_esp = type;
394 break;
395 default:
396 WARN_ON(1);
397 err = -EPROTONOSUPPORT;
398 break;
399 }
400
401 rcu_read_unlock();
402 return err;
403 }
404 EXPORT_SYMBOL(xfrm_register_type_offload);
405
xfrm_unregister_type_offload(const struct xfrm_type_offload * type,unsigned short family)406 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
407 unsigned short family)
408 {
409 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
410
411 if (unlikely(afinfo == NULL))
412 return;
413
414 switch (type->proto) {
415 case IPPROTO_ESP:
416 WARN_ON(afinfo->type_offload_esp != type);
417 afinfo->type_offload_esp = NULL;
418 break;
419 default:
420 WARN_ON(1);
421 break;
422 }
423 rcu_read_unlock();
424 }
425 EXPORT_SYMBOL(xfrm_unregister_type_offload);
426
xfrm_set_type_offload(struct xfrm_state * x)427 void xfrm_set_type_offload(struct xfrm_state *x)
428 {
429 const struct xfrm_type_offload *type = NULL;
430 struct xfrm_state_afinfo *afinfo;
431 bool try_load = true;
432
433 retry:
434 afinfo = xfrm_state_get_afinfo(x->props.family);
435 if (unlikely(afinfo == NULL))
436 goto out;
437
438 switch (x->id.proto) {
439 case IPPROTO_ESP:
440 type = afinfo->type_offload_esp;
441 break;
442 default:
443 break;
444 }
445
446 if ((type && !try_module_get(type->owner)))
447 type = NULL;
448
449 rcu_read_unlock();
450
451 if (!type && try_load) {
452 request_module("xfrm-offload-%d-%d", x->props.family,
453 x->id.proto);
454 try_load = false;
455 goto retry;
456 }
457
458 out:
459 x->type_offload = type;
460 }
461 EXPORT_SYMBOL(xfrm_set_type_offload);
462
463 static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = {
464 [XFRM_MODE_BEET] = {
465 .encap = XFRM_MODE_BEET,
466 .flags = XFRM_MODE_FLAG_TUNNEL,
467 .family = AF_INET,
468 },
469 [XFRM_MODE_TRANSPORT] = {
470 .encap = XFRM_MODE_TRANSPORT,
471 .family = AF_INET,
472 },
473 [XFRM_MODE_TUNNEL] = {
474 .encap = XFRM_MODE_TUNNEL,
475 .flags = XFRM_MODE_FLAG_TUNNEL,
476 .family = AF_INET,
477 },
478 [XFRM_MODE_IPTFS] = {
479 .encap = XFRM_MODE_IPTFS,
480 .flags = XFRM_MODE_FLAG_TUNNEL,
481 .family = AF_INET,
482 },
483 };
484
485 static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = {
486 [XFRM_MODE_BEET] = {
487 .encap = XFRM_MODE_BEET,
488 .flags = XFRM_MODE_FLAG_TUNNEL,
489 .family = AF_INET6,
490 },
491 [XFRM_MODE_ROUTEOPTIMIZATION] = {
492 .encap = XFRM_MODE_ROUTEOPTIMIZATION,
493 .family = AF_INET6,
494 },
495 [XFRM_MODE_TRANSPORT] = {
496 .encap = XFRM_MODE_TRANSPORT,
497 .family = AF_INET6,
498 },
499 [XFRM_MODE_TUNNEL] = {
500 .encap = XFRM_MODE_TUNNEL,
501 .flags = XFRM_MODE_FLAG_TUNNEL,
502 .family = AF_INET6,
503 },
504 [XFRM_MODE_IPTFS] = {
505 .encap = XFRM_MODE_IPTFS,
506 .flags = XFRM_MODE_FLAG_TUNNEL,
507 .family = AF_INET6,
508 },
509 };
510
xfrm_get_mode(unsigned int encap,int family)511 static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
512 {
513 const struct xfrm_mode *mode;
514
515 if (unlikely(encap >= XFRM_MODE_MAX))
516 return NULL;
517
518 switch (family) {
519 case AF_INET:
520 mode = &xfrm4_mode_map[encap];
521 if (mode->family == family)
522 return mode;
523 break;
524 case AF_INET6:
525 mode = &xfrm6_mode_map[encap];
526 if (mode->family == family)
527 return mode;
528 break;
529 default:
530 break;
531 }
532
533 return NULL;
534 }
535
536 static const struct xfrm_mode_cbs __rcu *xfrm_mode_cbs_map[XFRM_MODE_MAX];
537 static DEFINE_SPINLOCK(xfrm_mode_cbs_map_lock);
538
xfrm_register_mode_cbs(u8 mode,const struct xfrm_mode_cbs * mode_cbs)539 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs)
540 {
541 if (mode >= XFRM_MODE_MAX)
542 return -EINVAL;
543
544 spin_lock_bh(&xfrm_mode_cbs_map_lock);
545 rcu_assign_pointer(xfrm_mode_cbs_map[mode], mode_cbs);
546 spin_unlock_bh(&xfrm_mode_cbs_map_lock);
547
548 return 0;
549 }
550 EXPORT_SYMBOL(xfrm_register_mode_cbs);
551
xfrm_unregister_mode_cbs(u8 mode)552 void xfrm_unregister_mode_cbs(u8 mode)
553 {
554 if (mode >= XFRM_MODE_MAX)
555 return;
556
557 spin_lock_bh(&xfrm_mode_cbs_map_lock);
558 RCU_INIT_POINTER(xfrm_mode_cbs_map[mode], NULL);
559 spin_unlock_bh(&xfrm_mode_cbs_map_lock);
560 synchronize_rcu();
561 }
562 EXPORT_SYMBOL(xfrm_unregister_mode_cbs);
563
xfrm_get_mode_cbs(u8 mode)564 static const struct xfrm_mode_cbs *xfrm_get_mode_cbs(u8 mode)
565 {
566 const struct xfrm_mode_cbs *cbs;
567 bool try_load = true;
568
569 if (mode >= XFRM_MODE_MAX)
570 return NULL;
571
572 retry:
573 rcu_read_lock();
574
575 cbs = rcu_dereference(xfrm_mode_cbs_map[mode]);
576 if (cbs && !try_module_get(cbs->owner))
577 cbs = NULL;
578
579 rcu_read_unlock();
580
581 if (mode == XFRM_MODE_IPTFS && !cbs && try_load) {
582 request_module("xfrm-iptfs");
583 try_load = false;
584 goto retry;
585 }
586
587 return cbs;
588 }
589
xfrm_state_free(struct xfrm_state * x)590 void xfrm_state_free(struct xfrm_state *x)
591 {
592 kmem_cache_free(xfrm_state_cache, x);
593 }
594 EXPORT_SYMBOL(xfrm_state_free);
595
___xfrm_state_destroy(struct xfrm_state * x)596 static void ___xfrm_state_destroy(struct xfrm_state *x)
597 {
598 if (x->mode_cbs && x->mode_cbs->destroy_state)
599 x->mode_cbs->destroy_state(x);
600 hrtimer_cancel(&x->mtimer);
601 timer_delete_sync(&x->rtimer);
602 kfree_sensitive(x->aead);
603 kfree_sensitive(x->aalg);
604 kfree_sensitive(x->ealg);
605 kfree(x->calg);
606 kfree(x->encap);
607 kfree(x->coaddr);
608 kfree(x->replay_esn);
609 kfree(x->preplay_esn);
610 if (x->type) {
611 x->type->destructor(x);
612 xfrm_put_type(x->type);
613 }
614 if (x->xfrag.page)
615 put_page(x->xfrag.page);
616 xfrm_dev_state_free(x);
617 security_xfrm_state_free(x);
618 xfrm_state_free(x);
619 }
620
xfrm_state_gc_task(struct work_struct * work)621 static void xfrm_state_gc_task(struct work_struct *work)
622 {
623 struct xfrm_state *x;
624 struct hlist_node *tmp;
625 struct hlist_head gc_list;
626
627 spin_lock_bh(&xfrm_state_gc_lock);
628 hlist_move_list(&xfrm_state_gc_list, &gc_list);
629 spin_unlock_bh(&xfrm_state_gc_lock);
630
631 synchronize_rcu();
632
633 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
634 ___xfrm_state_destroy(x);
635 }
636
xfrm_timer_handler(struct hrtimer * me)637 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
638 {
639 struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer);
640 enum hrtimer_restart ret = HRTIMER_NORESTART;
641 time64_t now = ktime_get_real_seconds();
642 time64_t next = TIME64_MAX;
643 int warn = 0;
644 int err = 0;
645
646 spin_lock(&x->lock);
647 xfrm_dev_state_update_stats(x);
648
649 if (x->km.state == XFRM_STATE_DEAD)
650 goto out;
651 if (x->km.state == XFRM_STATE_EXPIRED)
652 goto expired;
653 if (x->lft.hard_add_expires_seconds) {
654 time64_t tmo = x->lft.hard_add_expires_seconds +
655 x->curlft.add_time - now;
656 if (tmo <= 0) {
657 if (x->xflags & XFRM_SOFT_EXPIRE) {
658 /* enter hard expire without soft expire first?!
659 * setting a new date could trigger this.
660 * workaround: fix x->curflt.add_time by below:
661 */
662 x->curlft.add_time = now - x->saved_tmo - 1;
663 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
664 } else
665 goto expired;
666 }
667 if (tmo < next)
668 next = tmo;
669 }
670 if (x->lft.hard_use_expires_seconds) {
671 time64_t tmo = x->lft.hard_use_expires_seconds +
672 (READ_ONCE(x->curlft.use_time) ? : now) - now;
673 if (tmo <= 0)
674 goto expired;
675 if (tmo < next)
676 next = tmo;
677 }
678 if (x->km.dying)
679 goto resched;
680 if (x->lft.soft_add_expires_seconds) {
681 time64_t tmo = x->lft.soft_add_expires_seconds +
682 x->curlft.add_time - now;
683 if (tmo <= 0) {
684 warn = 1;
685 x->xflags &= ~XFRM_SOFT_EXPIRE;
686 } else if (tmo < next) {
687 next = tmo;
688 x->xflags |= XFRM_SOFT_EXPIRE;
689 x->saved_tmo = tmo;
690 }
691 }
692 if (x->lft.soft_use_expires_seconds) {
693 time64_t tmo = x->lft.soft_use_expires_seconds +
694 (READ_ONCE(x->curlft.use_time) ? : now) - now;
695 if (tmo <= 0)
696 warn = 1;
697 else if (tmo < next)
698 next = tmo;
699 }
700
701 x->km.dying = warn;
702 if (warn)
703 km_state_expired(x, 0, 0);
704 resched:
705 if (next != TIME64_MAX) {
706 hrtimer_forward_now(&x->mtimer, ktime_set(next, 0));
707 ret = HRTIMER_RESTART;
708 }
709
710 goto out;
711
712 expired:
713 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
714 x->km.state = XFRM_STATE_EXPIRED;
715
716 err = __xfrm_state_delete(x);
717 if (!err)
718 km_state_expired(x, 1, 0);
719
720 xfrm_audit_state_delete(x, err ? 0 : 1, true);
721
722 out:
723 spin_unlock(&x->lock);
724 return ret;
725 }
726
727 static void xfrm_replay_timer_handler(struct timer_list *t);
728
xfrm_state_alloc(struct net * net)729 struct xfrm_state *xfrm_state_alloc(struct net *net)
730 {
731 struct xfrm_state *x;
732
733 x = kmem_cache_zalloc(xfrm_state_cache, GFP_ATOMIC);
734
735 if (x) {
736 write_pnet(&x->xs_net, net);
737 refcount_set(&x->refcnt, 1);
738 atomic_set(&x->tunnel_users, 0);
739 INIT_LIST_HEAD(&x->km.all);
740 INIT_HLIST_NODE(&x->state_cache);
741 INIT_HLIST_NODE(&x->bydst);
742 INIT_HLIST_NODE(&x->bysrc);
743 INIT_HLIST_NODE(&x->byspi);
744 INIT_HLIST_NODE(&x->byseq);
745 hrtimer_setup(&x->mtimer, xfrm_timer_handler, CLOCK_BOOTTIME,
746 HRTIMER_MODE_ABS_SOFT);
747 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
748 x->curlft.add_time = ktime_get_real_seconds();
749 x->lft.soft_byte_limit = XFRM_INF;
750 x->lft.soft_packet_limit = XFRM_INF;
751 x->lft.hard_byte_limit = XFRM_INF;
752 x->lft.hard_packet_limit = XFRM_INF;
753 x->replay_maxage = 0;
754 x->replay_maxdiff = 0;
755 x->pcpu_num = UINT_MAX;
756 spin_lock_init(&x->lock);
757 x->mode_data = NULL;
758 }
759 return x;
760 }
761 EXPORT_SYMBOL(xfrm_state_alloc);
762
763 #ifdef CONFIG_XFRM_OFFLOAD
xfrm_dev_state_delete(struct xfrm_state * x)764 void xfrm_dev_state_delete(struct xfrm_state *x)
765 {
766 struct xfrm_dev_offload *xso = &x->xso;
767 struct net_device *dev = READ_ONCE(xso->dev);
768
769 if (dev) {
770 dev->xfrmdev_ops->xdo_dev_state_delete(dev, x);
771 spin_lock_bh(&xfrm_state_dev_gc_lock);
772 hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list);
773 spin_unlock_bh(&xfrm_state_dev_gc_lock);
774 }
775 }
776 EXPORT_SYMBOL_GPL(xfrm_dev_state_delete);
777
xfrm_dev_state_free(struct xfrm_state * x)778 void xfrm_dev_state_free(struct xfrm_state *x)
779 {
780 struct xfrm_dev_offload *xso = &x->xso;
781 struct net_device *dev = READ_ONCE(xso->dev);
782
783 xfrm_unset_type_offload(x);
784
785 if (dev && dev->xfrmdev_ops) {
786 spin_lock_bh(&xfrm_state_dev_gc_lock);
787 if (!hlist_unhashed(&x->dev_gclist))
788 hlist_del(&x->dev_gclist);
789 spin_unlock_bh(&xfrm_state_dev_gc_lock);
790
791 if (dev->xfrmdev_ops->xdo_dev_state_free)
792 dev->xfrmdev_ops->xdo_dev_state_free(dev, x);
793 WRITE_ONCE(xso->dev, NULL);
794 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
795 netdev_put(dev, &xso->dev_tracker);
796 }
797 }
798 #endif
799
__xfrm_state_destroy(struct xfrm_state * x,bool sync)800 void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
801 {
802 WARN_ON(x->km.state != XFRM_STATE_DEAD);
803
804 if (sync) {
805 synchronize_rcu();
806 ___xfrm_state_destroy(x);
807 } else {
808 spin_lock_bh(&xfrm_state_gc_lock);
809 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
810 spin_unlock_bh(&xfrm_state_gc_lock);
811 schedule_work(&xfrm_state_gc_work);
812 }
813 }
814 EXPORT_SYMBOL(__xfrm_state_destroy);
815
__xfrm_state_delete(struct xfrm_state * x)816 int __xfrm_state_delete(struct xfrm_state *x)
817 {
818 struct net *net = xs_net(x);
819 int err = -ESRCH;
820
821 if (x->km.state != XFRM_STATE_DEAD) {
822 x->km.state = XFRM_STATE_DEAD;
823
824 spin_lock(&net->xfrm.xfrm_state_lock);
825 list_del(&x->km.all);
826 hlist_del_rcu(&x->bydst);
827 hlist_del_rcu(&x->bysrc);
828 if (x->km.seq)
829 hlist_del_rcu(&x->byseq);
830 if (!hlist_unhashed(&x->state_cache))
831 hlist_del_rcu(&x->state_cache);
832 if (!hlist_unhashed(&x->state_cache_input))
833 hlist_del_rcu(&x->state_cache_input);
834
835 if (x->id.spi)
836 hlist_del_rcu(&x->byspi);
837 net->xfrm.state_num--;
838 xfrm_nat_keepalive_state_updated(x);
839 spin_unlock(&net->xfrm.xfrm_state_lock);
840
841 xfrm_dev_state_delete(x);
842
843 /* All xfrm_state objects are created by xfrm_state_alloc.
844 * The xfrm_state_alloc call gives a reference, and that
845 * is what we are dropping here.
846 */
847 xfrm_state_put(x);
848 err = 0;
849 }
850
851 return err;
852 }
853 EXPORT_SYMBOL(__xfrm_state_delete);
854
xfrm_state_delete(struct xfrm_state * x)855 int xfrm_state_delete(struct xfrm_state *x)
856 {
857 int err;
858
859 spin_lock_bh(&x->lock);
860 err = __xfrm_state_delete(x);
861 spin_unlock_bh(&x->lock);
862
863 return err;
864 }
865 EXPORT_SYMBOL(xfrm_state_delete);
866
867 #ifdef CONFIG_SECURITY_NETWORK_XFRM
868 static inline int
xfrm_state_flush_secctx_check(struct net * net,u8 proto,bool task_valid)869 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
870 {
871 int i, err = 0;
872
873 for (i = 0; i <= net->xfrm.state_hmask; i++) {
874 struct xfrm_state *x;
875
876 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
877 if (xfrm_id_proto_match(x->id.proto, proto) &&
878 (err = security_xfrm_state_delete(x)) != 0) {
879 xfrm_audit_state_delete(x, 0, task_valid);
880 return err;
881 }
882 }
883 }
884
885 return err;
886 }
887
888 static inline int
xfrm_dev_state_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)889 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
890 {
891 int i, err = 0;
892
893 for (i = 0; i <= net->xfrm.state_hmask; i++) {
894 struct xfrm_state *x;
895 struct xfrm_dev_offload *xso;
896
897 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
898 xso = &x->xso;
899
900 if (xso->dev == dev &&
901 (err = security_xfrm_state_delete(x)) != 0) {
902 xfrm_audit_state_delete(x, 0, task_valid);
903 return err;
904 }
905 }
906 }
907
908 return err;
909 }
910 #else
911 static inline int
xfrm_state_flush_secctx_check(struct net * net,u8 proto,bool task_valid)912 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
913 {
914 return 0;
915 }
916
917 static inline int
xfrm_dev_state_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)918 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
919 {
920 return 0;
921 }
922 #endif
923
xfrm_state_flush(struct net * net,u8 proto,bool task_valid,bool sync)924 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
925 {
926 int i, err = 0, cnt = 0;
927
928 spin_lock_bh(&net->xfrm.xfrm_state_lock);
929 err = xfrm_state_flush_secctx_check(net, proto, task_valid);
930 if (err)
931 goto out;
932
933 err = -ESRCH;
934 for (i = 0; i <= net->xfrm.state_hmask; i++) {
935 struct xfrm_state *x;
936 restart:
937 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
938 if (!xfrm_state_kern(x) &&
939 xfrm_id_proto_match(x->id.proto, proto)) {
940 xfrm_state_hold(x);
941 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
942
943 err = xfrm_state_delete(x);
944 xfrm_audit_state_delete(x, err ? 0 : 1,
945 task_valid);
946 if (sync)
947 xfrm_state_put_sync(x);
948 else
949 xfrm_state_put(x);
950 if (!err)
951 cnt++;
952
953 spin_lock_bh(&net->xfrm.xfrm_state_lock);
954 goto restart;
955 }
956 }
957 }
958 out:
959 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
960 if (cnt)
961 err = 0;
962
963 return err;
964 }
965 EXPORT_SYMBOL(xfrm_state_flush);
966
xfrm_dev_state_flush(struct net * net,struct net_device * dev,bool task_valid)967 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
968 {
969 struct xfrm_state *x;
970 struct hlist_node *tmp;
971 struct xfrm_dev_offload *xso;
972 int i, err = 0, cnt = 0;
973
974 spin_lock_bh(&net->xfrm.xfrm_state_lock);
975 err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
976 if (err)
977 goto out;
978
979 err = -ESRCH;
980 for (i = 0; i <= net->xfrm.state_hmask; i++) {
981 restart:
982 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
983 xso = &x->xso;
984
985 if (!xfrm_state_kern(x) && xso->dev == dev) {
986 xfrm_state_hold(x);
987 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
988
989 err = xfrm_state_delete(x);
990 xfrm_dev_state_free(x);
991
992 xfrm_audit_state_delete(x, err ? 0 : 1,
993 task_valid);
994 xfrm_state_put(x);
995 if (!err)
996 cnt++;
997
998 spin_lock_bh(&net->xfrm.xfrm_state_lock);
999 goto restart;
1000 }
1001 }
1002 }
1003 if (cnt)
1004 err = 0;
1005
1006 out:
1007 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1008
1009 spin_lock_bh(&xfrm_state_dev_gc_lock);
1010 restart_gc:
1011 hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) {
1012 xso = &x->xso;
1013
1014 if (xso->dev == dev) {
1015 spin_unlock_bh(&xfrm_state_dev_gc_lock);
1016 xfrm_dev_state_free(x);
1017 spin_lock_bh(&xfrm_state_dev_gc_lock);
1018 goto restart_gc;
1019 }
1020
1021 }
1022 spin_unlock_bh(&xfrm_state_dev_gc_lock);
1023
1024 xfrm_flush_gc();
1025
1026 return err;
1027 }
1028 EXPORT_SYMBOL(xfrm_dev_state_flush);
1029
xfrm_sad_getinfo(struct net * net,struct xfrmk_sadinfo * si)1030 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
1031 {
1032 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1033 si->sadcnt = net->xfrm.state_num;
1034 si->sadhcnt = net->xfrm.state_hmask + 1;
1035 si->sadhmcnt = xfrm_state_hashmax;
1036 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1037 }
1038 EXPORT_SYMBOL(xfrm_sad_getinfo);
1039
1040 static void
__xfrm4_init_tempsel(struct xfrm_selector * sel,const struct flowi * fl)1041 __xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
1042 {
1043 const struct flowi4 *fl4 = &fl->u.ip4;
1044
1045 sel->daddr.a4 = fl4->daddr;
1046 sel->saddr.a4 = fl4->saddr;
1047 sel->dport = xfrm_flowi_dport(fl, &fl4->uli);
1048 sel->dport_mask = htons(0xffff);
1049 sel->sport = xfrm_flowi_sport(fl, &fl4->uli);
1050 sel->sport_mask = htons(0xffff);
1051 sel->family = AF_INET;
1052 sel->prefixlen_d = 32;
1053 sel->prefixlen_s = 32;
1054 sel->proto = fl4->flowi4_proto;
1055 sel->ifindex = fl4->flowi4_oif;
1056 }
1057
1058 static void
__xfrm6_init_tempsel(struct xfrm_selector * sel,const struct flowi * fl)1059 __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
1060 {
1061 const struct flowi6 *fl6 = &fl->u.ip6;
1062
1063 /* Initialize temporary selector matching only to current session. */
1064 *(struct in6_addr *)&sel->daddr = fl6->daddr;
1065 *(struct in6_addr *)&sel->saddr = fl6->saddr;
1066 sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
1067 sel->dport_mask = htons(0xffff);
1068 sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
1069 sel->sport_mask = htons(0xffff);
1070 sel->family = AF_INET6;
1071 sel->prefixlen_d = 128;
1072 sel->prefixlen_s = 128;
1073 sel->proto = fl6->flowi6_proto;
1074 sel->ifindex = fl6->flowi6_oif;
1075 }
1076
1077 static void
xfrm_init_tempstate(struct xfrm_state * x,const struct flowi * fl,const struct xfrm_tmpl * tmpl,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)1078 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
1079 const struct xfrm_tmpl *tmpl,
1080 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1081 unsigned short family)
1082 {
1083 switch (family) {
1084 case AF_INET:
1085 __xfrm4_init_tempsel(&x->sel, fl);
1086 break;
1087 case AF_INET6:
1088 __xfrm6_init_tempsel(&x->sel, fl);
1089 break;
1090 }
1091
1092 x->id = tmpl->id;
1093
1094 switch (tmpl->encap_family) {
1095 case AF_INET:
1096 if (x->id.daddr.a4 == 0)
1097 x->id.daddr.a4 = daddr->a4;
1098 x->props.saddr = tmpl->saddr;
1099 if (x->props.saddr.a4 == 0)
1100 x->props.saddr.a4 = saddr->a4;
1101 break;
1102 case AF_INET6:
1103 if (ipv6_addr_any((struct in6_addr *)&x->id.daddr))
1104 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
1105 memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
1106 if (ipv6_addr_any((struct in6_addr *)&x->props.saddr))
1107 memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
1108 break;
1109 }
1110
1111 x->props.mode = tmpl->mode;
1112 x->props.reqid = tmpl->reqid;
1113 x->props.family = tmpl->encap_family;
1114 }
1115
1116 struct xfrm_hash_state_ptrs {
1117 const struct hlist_head *bydst;
1118 const struct hlist_head *bysrc;
1119 const struct hlist_head *byspi;
1120 unsigned int hmask;
1121 };
1122
xfrm_hash_ptrs_get(const struct net * net,struct xfrm_hash_state_ptrs * ptrs)1123 static void xfrm_hash_ptrs_get(const struct net *net, struct xfrm_hash_state_ptrs *ptrs)
1124 {
1125 unsigned int sequence;
1126
1127 do {
1128 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
1129
1130 ptrs->bydst = xfrm_state_deref_check(net->xfrm.state_bydst, net);
1131 ptrs->bysrc = xfrm_state_deref_check(net->xfrm.state_bysrc, net);
1132 ptrs->byspi = xfrm_state_deref_check(net->xfrm.state_byspi, net);
1133 ptrs->hmask = net->xfrm.state_hmask;
1134 } while (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence));
1135 }
1136
__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs * state_ptrs,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family,struct xfrm_dev_offload * xdo)1137 static struct xfrm_state *__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs *state_ptrs,
1138 u32 mark,
1139 const xfrm_address_t *daddr,
1140 __be32 spi, u8 proto,
1141 unsigned short family,
1142 struct xfrm_dev_offload *xdo)
1143 {
1144 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
1145 struct xfrm_state *x;
1146
1147 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
1148 #ifdef CONFIG_XFRM_OFFLOAD
1149 if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
1150 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1151 /* HW states are in the head of list, there is
1152 * no need to iterate further.
1153 */
1154 break;
1155
1156 /* Packet offload: both policy and SA should
1157 * have same device.
1158 */
1159 if (xdo->dev != x->xso.dev)
1160 continue;
1161 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1162 /* Skip HW policy for SW lookups */
1163 continue;
1164 #endif
1165 if (x->props.family != family ||
1166 x->id.spi != spi ||
1167 x->id.proto != proto ||
1168 !xfrm_addr_equal(&x->id.daddr, daddr, family))
1169 continue;
1170
1171 if ((mark & x->mark.m) != x->mark.v)
1172 continue;
1173 if (!xfrm_state_hold_rcu(x))
1174 continue;
1175 return x;
1176 }
1177
1178 return NULL;
1179 }
1180
__xfrm_state_lookup(const struct xfrm_hash_state_ptrs * state_ptrs,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)1181 static struct xfrm_state *__xfrm_state_lookup(const struct xfrm_hash_state_ptrs *state_ptrs,
1182 u32 mark,
1183 const xfrm_address_t *daddr,
1184 __be32 spi, u8 proto,
1185 unsigned short family)
1186 {
1187 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
1188 struct xfrm_state *x;
1189
1190 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
1191 if (x->props.family != family ||
1192 x->id.spi != spi ||
1193 x->id.proto != proto ||
1194 !xfrm_addr_equal(&x->id.daddr, daddr, family))
1195 continue;
1196
1197 if ((mark & x->mark.m) != x->mark.v)
1198 continue;
1199 if (!xfrm_state_hold_rcu(x))
1200 continue;
1201 return x;
1202 }
1203
1204 return NULL;
1205 }
1206
xfrm_input_state_lookup(struct net * net,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)1207 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
1208 const xfrm_address_t *daddr,
1209 __be32 spi, u8 proto,
1210 unsigned short family)
1211 {
1212 struct xfrm_hash_state_ptrs state_ptrs;
1213 struct hlist_head *state_cache_input;
1214 struct xfrm_state *x = NULL;
1215
1216 state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input);
1217
1218 rcu_read_lock();
1219 hlist_for_each_entry_rcu(x, state_cache_input, state_cache_input) {
1220 if (x->props.family != family ||
1221 x->id.spi != spi ||
1222 x->id.proto != proto ||
1223 !xfrm_addr_equal(&x->id.daddr, daddr, family))
1224 continue;
1225
1226 if ((mark & x->mark.m) != x->mark.v)
1227 continue;
1228 if (!xfrm_state_hold_rcu(x))
1229 continue;
1230 goto out;
1231 }
1232
1233 xfrm_hash_ptrs_get(net, &state_ptrs);
1234
1235 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
1236
1237 if (x && x->km.state == XFRM_STATE_VALID) {
1238 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1239 if (hlist_unhashed(&x->state_cache_input)) {
1240 hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
1241 } else {
1242 hlist_del_rcu(&x->state_cache_input);
1243 hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
1244 }
1245 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1246 }
1247
1248 out:
1249 rcu_read_unlock();
1250 return x;
1251 }
1252 EXPORT_SYMBOL(xfrm_input_state_lookup);
1253
__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs * state_ptrs,u32 mark,const xfrm_address_t * daddr,const xfrm_address_t * saddr,u8 proto,unsigned short family)1254 static struct xfrm_state *__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs *state_ptrs,
1255 u32 mark,
1256 const xfrm_address_t *daddr,
1257 const xfrm_address_t *saddr,
1258 u8 proto, unsigned short family)
1259 {
1260 unsigned int h = __xfrm_src_hash(daddr, saddr, family, state_ptrs->hmask);
1261 struct xfrm_state *x;
1262
1263 hlist_for_each_entry_rcu(x, state_ptrs->bysrc + h, bysrc) {
1264 if (x->props.family != family ||
1265 x->id.proto != proto ||
1266 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1267 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1268 continue;
1269
1270 if ((mark & x->mark.m) != x->mark.v)
1271 continue;
1272 if (!xfrm_state_hold_rcu(x))
1273 continue;
1274 return x;
1275 }
1276
1277 return NULL;
1278 }
1279
1280 static inline struct xfrm_state *
__xfrm_state_locate(struct xfrm_state * x,int use_spi,int family)1281 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
1282 {
1283 struct xfrm_hash_state_ptrs state_ptrs;
1284 struct net *net = xs_net(x);
1285 u32 mark = x->mark.v & x->mark.m;
1286
1287 xfrm_hash_ptrs_get(net, &state_ptrs);
1288
1289 if (use_spi)
1290 return __xfrm_state_lookup(&state_ptrs, mark, &x->id.daddr,
1291 x->id.spi, x->id.proto, family);
1292 else
1293 return __xfrm_state_lookup_byaddr(&state_ptrs, mark,
1294 &x->id.daddr,
1295 &x->props.saddr,
1296 x->id.proto, family);
1297 }
1298
xfrm_hash_grow_check(struct net * net,int have_hash_collision)1299 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
1300 {
1301 if (have_hash_collision &&
1302 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
1303 net->xfrm.state_num > net->xfrm.state_hmask)
1304 schedule_work(&net->xfrm.state_hash_work);
1305 }
1306
xfrm_state_look_at(struct xfrm_policy * pol,struct xfrm_state * x,const struct flowi * fl,unsigned short family,struct xfrm_state ** best,int * acq_in_progress,int * error)1307 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
1308 const struct flowi *fl, unsigned short family,
1309 struct xfrm_state **best, int *acq_in_progress,
1310 int *error)
1311 {
1312 /* We need the cpu id just as a lookup key,
1313 * we don't require it to be stable.
1314 */
1315 unsigned int pcpu_id = get_cpu();
1316 put_cpu();
1317
1318 /* Resolution logic:
1319 * 1. There is a valid state with matching selector. Done.
1320 * 2. Valid state with inappropriate selector. Skip.
1321 *
1322 * Entering area of "sysdeps".
1323 *
1324 * 3. If state is not valid, selector is temporary, it selects
1325 * only session which triggered previous resolution. Key
1326 * manager will do something to install a state with proper
1327 * selector.
1328 */
1329 if (x->km.state == XFRM_STATE_VALID) {
1330 if ((x->sel.family &&
1331 (x->sel.family != family ||
1332 !xfrm_selector_match(&x->sel, fl, family))) ||
1333 !security_xfrm_state_pol_flow_match(x, pol,
1334 &fl->u.__fl_common))
1335 return;
1336
1337 if (x->pcpu_num != UINT_MAX && x->pcpu_num != pcpu_id)
1338 return;
1339
1340 if (!*best ||
1341 ((*best)->pcpu_num == UINT_MAX && x->pcpu_num == pcpu_id) ||
1342 (*best)->km.dying > x->km.dying ||
1343 ((*best)->km.dying == x->km.dying &&
1344 (*best)->curlft.add_time < x->curlft.add_time))
1345 *best = x;
1346 } else if (x->km.state == XFRM_STATE_ACQ) {
1347 if (!*best || x->pcpu_num == pcpu_id)
1348 *acq_in_progress = 1;
1349 } else if (x->km.state == XFRM_STATE_ERROR ||
1350 x->km.state == XFRM_STATE_EXPIRED) {
1351 if ((!x->sel.family ||
1352 (x->sel.family == family &&
1353 xfrm_selector_match(&x->sel, fl, family))) &&
1354 security_xfrm_state_pol_flow_match(x, pol,
1355 &fl->u.__fl_common))
1356 *error = -ESRCH;
1357 }
1358 }
1359
1360 struct xfrm_state *
xfrm_state_find(const xfrm_address_t * daddr,const xfrm_address_t * saddr,const struct flowi * fl,struct xfrm_tmpl * tmpl,struct xfrm_policy * pol,int * err,unsigned short family,u32 if_id)1361 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1362 const struct flowi *fl, struct xfrm_tmpl *tmpl,
1363 struct xfrm_policy *pol, int *err,
1364 unsigned short family, u32 if_id)
1365 {
1366 static xfrm_address_t saddr_wildcard = { };
1367 struct xfrm_hash_state_ptrs state_ptrs;
1368 struct net *net = xp_net(pol);
1369 unsigned int h, h_wildcard;
1370 struct xfrm_state *x, *x0, *to_put;
1371 int acquire_in_progress = 0;
1372 int error = 0;
1373 struct xfrm_state *best = NULL;
1374 u32 mark = pol->mark.v & pol->mark.m;
1375 unsigned short encap_family = tmpl->encap_family;
1376 unsigned int sequence;
1377 struct km_event c;
1378 unsigned int pcpu_id;
1379 bool cached = false;
1380
1381 /* We need the cpu id just as a lookup key,
1382 * we don't require it to be stable.
1383 */
1384 pcpu_id = get_cpu();
1385 put_cpu();
1386
1387 to_put = NULL;
1388
1389 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
1390
1391 rcu_read_lock();
1392 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
1393 if (x->props.family == encap_family &&
1394 x->props.reqid == tmpl->reqid &&
1395 (mark & x->mark.m) == x->mark.v &&
1396 x->if_id == if_id &&
1397 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1398 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
1399 tmpl->mode == x->props.mode &&
1400 tmpl->id.proto == x->id.proto &&
1401 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1402 xfrm_state_look_at(pol, x, fl, encap_family,
1403 &best, &acquire_in_progress, &error);
1404 }
1405
1406 if (best)
1407 goto cached;
1408
1409 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
1410 if (x->props.family == encap_family &&
1411 x->props.reqid == tmpl->reqid &&
1412 (mark & x->mark.m) == x->mark.v &&
1413 x->if_id == if_id &&
1414 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1415 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
1416 tmpl->mode == x->props.mode &&
1417 tmpl->id.proto == x->id.proto &&
1418 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1419 xfrm_state_look_at(pol, x, fl, family,
1420 &best, &acquire_in_progress, &error);
1421 }
1422
1423 cached:
1424 cached = true;
1425 if (best)
1426 goto found;
1427 else if (error)
1428 best = NULL;
1429 else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */
1430 WARN_ON(1);
1431
1432 xfrm_hash_ptrs_get(net, &state_ptrs);
1433
1434 h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask);
1435 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) {
1436 #ifdef CONFIG_XFRM_OFFLOAD
1437 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1438 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1439 /* HW states are in the head of list, there is
1440 * no need to iterate further.
1441 */
1442 break;
1443
1444 /* Packet offload: both policy and SA should
1445 * have same device.
1446 */
1447 if (pol->xdo.dev != x->xso.dev)
1448 continue;
1449 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1450 /* Skip HW policy for SW lookups */
1451 continue;
1452 #endif
1453 if (x->props.family == encap_family &&
1454 x->props.reqid == tmpl->reqid &&
1455 (mark & x->mark.m) == x->mark.v &&
1456 x->if_id == if_id &&
1457 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1458 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
1459 tmpl->mode == x->props.mode &&
1460 tmpl->id.proto == x->id.proto &&
1461 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1462 xfrm_state_look_at(pol, x, fl, family,
1463 &best, &acquire_in_progress, &error);
1464 }
1465 if (best || acquire_in_progress)
1466 goto found;
1467
1468 h_wildcard = __xfrm_dst_hash(daddr, &saddr_wildcard, tmpl->reqid,
1469 encap_family, state_ptrs.hmask);
1470 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h_wildcard, bydst) {
1471 #ifdef CONFIG_XFRM_OFFLOAD
1472 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1473 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1474 /* HW states are in the head of list, there is
1475 * no need to iterate further.
1476 */
1477 break;
1478
1479 /* Packet offload: both policy and SA should
1480 * have same device.
1481 */
1482 if (pol->xdo.dev != x->xso.dev)
1483 continue;
1484 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1485 /* Skip HW policy for SW lookups */
1486 continue;
1487 #endif
1488 if (x->props.family == encap_family &&
1489 x->props.reqid == tmpl->reqid &&
1490 (mark & x->mark.m) == x->mark.v &&
1491 x->if_id == if_id &&
1492 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1493 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
1494 tmpl->mode == x->props.mode &&
1495 tmpl->id.proto == x->id.proto &&
1496 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1497 xfrm_state_look_at(pol, x, fl, family,
1498 &best, &acquire_in_progress, &error);
1499 }
1500
1501 found:
1502 if (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) ||
1503 (best && (best->pcpu_num == pcpu_id)))
1504 x = best;
1505
1506 if (!x && !error && !acquire_in_progress) {
1507 if (tmpl->id.spi &&
1508 (x0 = __xfrm_state_lookup_all(&state_ptrs, mark, daddr,
1509 tmpl->id.spi, tmpl->id.proto,
1510 encap_family,
1511 &pol->xdo)) != NULL) {
1512 to_put = x0;
1513 error = -EEXIST;
1514 goto out;
1515 }
1516
1517 c.net = net;
1518 /* If the KMs have no listeners (yet...), avoid allocating an SA
1519 * for each and every packet - garbage collection might not
1520 * handle the flood.
1521 */
1522 if (!km_is_alive(&c)) {
1523 error = -ESRCH;
1524 goto out;
1525 }
1526
1527 x = xfrm_state_alloc(net);
1528 if (x == NULL) {
1529 error = -ENOMEM;
1530 goto out;
1531 }
1532 /* Initialize temporary state matching only
1533 * to current session. */
1534 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
1535 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
1536 x->if_id = if_id;
1537 if ((pol->flags & XFRM_POLICY_CPU_ACQUIRE) && best)
1538 x->pcpu_num = pcpu_id;
1539
1540 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
1541 if (error) {
1542 x->km.state = XFRM_STATE_DEAD;
1543 to_put = x;
1544 x = NULL;
1545 goto out;
1546 }
1547 #ifdef CONFIG_XFRM_OFFLOAD
1548 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1549 struct xfrm_dev_offload *xdo = &pol->xdo;
1550 struct xfrm_dev_offload *xso = &x->xso;
1551 struct net_device *dev = xdo->dev;
1552
1553 xso->type = XFRM_DEV_OFFLOAD_PACKET;
1554 xso->dir = xdo->dir;
1555 xso->dev = dev;
1556 xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ;
1557 netdev_hold(dev, &xso->dev_tracker, GFP_ATOMIC);
1558 error = dev->xfrmdev_ops->xdo_dev_state_add(dev, x,
1559 NULL);
1560 if (error) {
1561 xso->dir = 0;
1562 netdev_put(dev, &xso->dev_tracker);
1563 xso->dev = NULL;
1564 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
1565 x->km.state = XFRM_STATE_DEAD;
1566 to_put = x;
1567 x = NULL;
1568 goto out;
1569 }
1570 }
1571 #endif
1572 if (km_query(x, tmpl, pol) == 0) {
1573 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1574 x->km.state = XFRM_STATE_ACQ;
1575 x->dir = XFRM_SA_DIR_OUT;
1576 list_add(&x->km.all, &net->xfrm.state_all);
1577 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
1578 XFRM_STATE_INSERT(bydst, &x->bydst,
1579 net->xfrm.state_bydst + h,
1580 x->xso.type);
1581 h = xfrm_src_hash(net, daddr, saddr, encap_family);
1582 XFRM_STATE_INSERT(bysrc, &x->bysrc,
1583 net->xfrm.state_bysrc + h,
1584 x->xso.type);
1585 INIT_HLIST_NODE(&x->state_cache);
1586 if (x->id.spi) {
1587 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1588 XFRM_STATE_INSERT(byspi, &x->byspi,
1589 net->xfrm.state_byspi + h,
1590 x->xso.type);
1591 }
1592 if (x->km.seq) {
1593 h = xfrm_seq_hash(net, x->km.seq);
1594 XFRM_STATE_INSERT(byseq, &x->byseq,
1595 net->xfrm.state_byseq + h,
1596 x->xso.type);
1597 }
1598 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1599 hrtimer_start(&x->mtimer,
1600 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1601 HRTIMER_MODE_REL_SOFT);
1602 net->xfrm.state_num++;
1603 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1604 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1605 } else {
1606 #ifdef CONFIG_XFRM_OFFLOAD
1607 struct xfrm_dev_offload *xso = &x->xso;
1608
1609 if (xso->type == XFRM_DEV_OFFLOAD_PACKET) {
1610 xfrm_dev_state_delete(x);
1611 xfrm_dev_state_free(x);
1612 }
1613 #endif
1614 x->km.state = XFRM_STATE_DEAD;
1615 to_put = x;
1616 x = NULL;
1617 error = -ESRCH;
1618 }
1619
1620 /* Use the already installed 'fallback' while the CPU-specific
1621 * SA acquire is handled*/
1622 if (best)
1623 x = best;
1624 }
1625 out:
1626 if (x) {
1627 if (!xfrm_state_hold_rcu(x)) {
1628 *err = -EAGAIN;
1629 x = NULL;
1630 }
1631 } else {
1632 *err = acquire_in_progress ? -EAGAIN : error;
1633 }
1634
1635 if (x && x->km.state == XFRM_STATE_VALID && !cached &&
1636 (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || x->pcpu_num == pcpu_id)) {
1637 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1638 if (hlist_unhashed(&x->state_cache))
1639 hlist_add_head_rcu(&x->state_cache, &pol->state_cache_list);
1640 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1641 }
1642
1643 rcu_read_unlock();
1644 if (to_put)
1645 xfrm_state_put(to_put);
1646
1647 if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
1648 *err = -EAGAIN;
1649 if (x) {
1650 xfrm_state_put(x);
1651 x = NULL;
1652 }
1653 }
1654
1655 return x;
1656 }
1657
1658 struct xfrm_state *
xfrm_stateonly_find(struct net * net,u32 mark,u32 if_id,xfrm_address_t * daddr,xfrm_address_t * saddr,unsigned short family,u8 mode,u8 proto,u32 reqid)1659 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1660 xfrm_address_t *daddr, xfrm_address_t *saddr,
1661 unsigned short family, u8 mode, u8 proto, u32 reqid)
1662 {
1663 unsigned int h;
1664 struct xfrm_state *rx = NULL, *x = NULL;
1665
1666 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1667 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1668 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1669 if (x->props.family == family &&
1670 x->props.reqid == reqid &&
1671 (mark & x->mark.m) == x->mark.v &&
1672 x->if_id == if_id &&
1673 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1674 xfrm_state_addr_check(x, daddr, saddr, family) &&
1675 mode == x->props.mode &&
1676 proto == x->id.proto &&
1677 x->km.state == XFRM_STATE_VALID) {
1678 rx = x;
1679 break;
1680 }
1681 }
1682
1683 if (rx)
1684 xfrm_state_hold(rx);
1685 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1686
1687
1688 return rx;
1689 }
1690 EXPORT_SYMBOL(xfrm_stateonly_find);
1691
xfrm_state_lookup_byspi(struct net * net,__be32 spi,unsigned short family)1692 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1693 unsigned short family)
1694 {
1695 struct xfrm_state *x;
1696 struct xfrm_state_walk *w;
1697
1698 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1699 list_for_each_entry(w, &net->xfrm.state_all, all) {
1700 x = container_of(w, struct xfrm_state, km);
1701 if (x->props.family != family ||
1702 x->id.spi != spi)
1703 continue;
1704
1705 xfrm_state_hold(x);
1706 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1707 return x;
1708 }
1709 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1710 return NULL;
1711 }
1712 EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1713
__xfrm_state_insert(struct xfrm_state * x)1714 static void __xfrm_state_insert(struct xfrm_state *x)
1715 {
1716 struct net *net = xs_net(x);
1717 unsigned int h;
1718
1719 list_add(&x->km.all, &net->xfrm.state_all);
1720
1721 /* Sanitize mark before store */
1722 x->mark.v &= x->mark.m;
1723
1724 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1725 x->props.reqid, x->props.family);
1726 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
1727 x->xso.type);
1728
1729 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1730 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
1731 x->xso.type);
1732
1733 if (x->id.spi) {
1734 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1735 x->props.family);
1736
1737 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
1738 x->xso.type);
1739 }
1740
1741 if (x->km.seq) {
1742 h = xfrm_seq_hash(net, x->km.seq);
1743
1744 XFRM_STATE_INSERT(byseq, &x->byseq, net->xfrm.state_byseq + h,
1745 x->xso.type);
1746 }
1747
1748 hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
1749 if (x->replay_maxage)
1750 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1751
1752 net->xfrm.state_num++;
1753
1754 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1755 xfrm_nat_keepalive_state_updated(x);
1756 }
1757
1758 /* net->xfrm.xfrm_state_lock is held */
__xfrm_state_bump_genids(struct xfrm_state * xnew)1759 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1760 {
1761 struct net *net = xs_net(xnew);
1762 unsigned short family = xnew->props.family;
1763 u32 reqid = xnew->props.reqid;
1764 struct xfrm_state *x;
1765 unsigned int h;
1766 u32 mark = xnew->mark.v & xnew->mark.m;
1767 u32 if_id = xnew->if_id;
1768 u32 cpu_id = xnew->pcpu_num;
1769
1770 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1771 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1772 if (x->props.family == family &&
1773 x->props.reqid == reqid &&
1774 x->if_id == if_id &&
1775 x->pcpu_num == cpu_id &&
1776 (mark & x->mark.m) == x->mark.v &&
1777 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1778 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1779 x->genid++;
1780 }
1781 }
1782
xfrm_state_insert(struct xfrm_state * x)1783 void xfrm_state_insert(struct xfrm_state *x)
1784 {
1785 struct net *net = xs_net(x);
1786
1787 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1788 __xfrm_state_bump_genids(x);
1789 __xfrm_state_insert(x);
1790 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1791 }
1792 EXPORT_SYMBOL(xfrm_state_insert);
1793
1794 /* net->xfrm.xfrm_state_lock is held */
__find_acq_core(struct net * net,const struct xfrm_mark * m,unsigned short family,u8 mode,u32 reqid,u32 if_id,u32 pcpu_num,u8 proto,const xfrm_address_t * daddr,const xfrm_address_t * saddr,int create)1795 static struct xfrm_state *__find_acq_core(struct net *net,
1796 const struct xfrm_mark *m,
1797 unsigned short family, u8 mode,
1798 u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
1799 const xfrm_address_t *daddr,
1800 const xfrm_address_t *saddr,
1801 int create)
1802 {
1803 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1804 struct xfrm_state *x;
1805 u32 mark = m->v & m->m;
1806
1807 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1808 if (x->props.reqid != reqid ||
1809 x->props.mode != mode ||
1810 x->props.family != family ||
1811 x->km.state != XFRM_STATE_ACQ ||
1812 x->id.spi != 0 ||
1813 x->id.proto != proto ||
1814 (mark & x->mark.m) != x->mark.v ||
1815 x->pcpu_num != pcpu_num ||
1816 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1817 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1818 continue;
1819
1820 xfrm_state_hold(x);
1821 return x;
1822 }
1823
1824 if (!create)
1825 return NULL;
1826
1827 x = xfrm_state_alloc(net);
1828 if (likely(x)) {
1829 switch (family) {
1830 case AF_INET:
1831 x->sel.daddr.a4 = daddr->a4;
1832 x->sel.saddr.a4 = saddr->a4;
1833 x->sel.prefixlen_d = 32;
1834 x->sel.prefixlen_s = 32;
1835 x->props.saddr.a4 = saddr->a4;
1836 x->id.daddr.a4 = daddr->a4;
1837 break;
1838
1839 case AF_INET6:
1840 x->sel.daddr.in6 = daddr->in6;
1841 x->sel.saddr.in6 = saddr->in6;
1842 x->sel.prefixlen_d = 128;
1843 x->sel.prefixlen_s = 128;
1844 x->props.saddr.in6 = saddr->in6;
1845 x->id.daddr.in6 = daddr->in6;
1846 break;
1847 }
1848
1849 x->pcpu_num = pcpu_num;
1850 x->km.state = XFRM_STATE_ACQ;
1851 x->id.proto = proto;
1852 x->props.family = family;
1853 x->props.mode = mode;
1854 x->props.reqid = reqid;
1855 x->if_id = if_id;
1856 x->mark.v = m->v;
1857 x->mark.m = m->m;
1858 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1859 xfrm_state_hold(x);
1860 hrtimer_start(&x->mtimer,
1861 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1862 HRTIMER_MODE_REL_SOFT);
1863 list_add(&x->km.all, &net->xfrm.state_all);
1864 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
1865 x->xso.type);
1866 h = xfrm_src_hash(net, daddr, saddr, family);
1867 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
1868 x->xso.type);
1869
1870 net->xfrm.state_num++;
1871
1872 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1873 }
1874
1875 return x;
1876 }
1877
1878 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
1879
xfrm_state_add(struct xfrm_state * x)1880 int xfrm_state_add(struct xfrm_state *x)
1881 {
1882 struct net *net = xs_net(x);
1883 struct xfrm_state *x1, *to_put;
1884 int family;
1885 int err;
1886 u32 mark = x->mark.v & x->mark.m;
1887 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1888
1889 family = x->props.family;
1890
1891 to_put = NULL;
1892
1893 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1894
1895 x1 = __xfrm_state_locate(x, use_spi, family);
1896 if (x1) {
1897 to_put = x1;
1898 x1 = NULL;
1899 err = -EEXIST;
1900 goto out;
1901 }
1902
1903 if (use_spi && x->km.seq) {
1904 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq, x->pcpu_num);
1905 if (x1 && ((x1->id.proto != x->id.proto) ||
1906 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1907 to_put = x1;
1908 x1 = NULL;
1909 }
1910 }
1911
1912 if (use_spi && !x1)
1913 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1914 x->props.reqid, x->if_id, x->pcpu_num, x->id.proto,
1915 &x->id.daddr, &x->props.saddr, 0);
1916
1917 __xfrm_state_bump_genids(x);
1918 __xfrm_state_insert(x);
1919 err = 0;
1920
1921 out:
1922 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1923
1924 if (x1) {
1925 xfrm_state_delete(x1);
1926 xfrm_state_put(x1);
1927 }
1928
1929 if (to_put)
1930 xfrm_state_put(to_put);
1931
1932 return err;
1933 }
1934 EXPORT_SYMBOL(xfrm_state_add);
1935
1936 #ifdef CONFIG_XFRM_MIGRATE
clone_security(struct xfrm_state * x,struct xfrm_sec_ctx * security)1937 static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security)
1938 {
1939 struct xfrm_user_sec_ctx *uctx;
1940 int size = sizeof(*uctx) + security->ctx_len;
1941 int err;
1942
1943 uctx = kmalloc(size, GFP_KERNEL);
1944 if (!uctx)
1945 return -ENOMEM;
1946
1947 uctx->exttype = XFRMA_SEC_CTX;
1948 uctx->len = size;
1949 uctx->ctx_doi = security->ctx_doi;
1950 uctx->ctx_alg = security->ctx_alg;
1951 uctx->ctx_len = security->ctx_len;
1952 memcpy(uctx + 1, security->ctx_str, security->ctx_len);
1953 err = security_xfrm_state_alloc(x, uctx);
1954 kfree(uctx);
1955 if (err)
1956 return err;
1957
1958 return 0;
1959 }
1960
xfrm_state_clone_and_setup(struct xfrm_state * orig,struct xfrm_encap_tmpl * encap,struct xfrm_migrate * m)1961 static struct xfrm_state *xfrm_state_clone_and_setup(struct xfrm_state *orig,
1962 struct xfrm_encap_tmpl *encap,
1963 struct xfrm_migrate *m)
1964 {
1965 struct net *net = xs_net(orig);
1966 struct xfrm_state *x = xfrm_state_alloc(net);
1967 if (!x)
1968 goto out;
1969
1970 memcpy(&x->id, &orig->id, sizeof(x->id));
1971 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1972 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1973 x->props.mode = orig->props.mode;
1974 x->props.replay_window = orig->props.replay_window;
1975 x->props.reqid = orig->props.reqid;
1976 x->props.family = orig->props.family;
1977 x->props.saddr = orig->props.saddr;
1978
1979 if (orig->aalg) {
1980 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1981 if (!x->aalg)
1982 goto error;
1983 }
1984 x->props.aalgo = orig->props.aalgo;
1985
1986 if (orig->aead) {
1987 x->aead = xfrm_algo_aead_clone(orig->aead);
1988 x->geniv = orig->geniv;
1989 if (!x->aead)
1990 goto error;
1991 }
1992 if (orig->ealg) {
1993 x->ealg = xfrm_algo_clone(orig->ealg);
1994 if (!x->ealg)
1995 goto error;
1996 }
1997 x->props.ealgo = orig->props.ealgo;
1998
1999 if (orig->calg) {
2000 x->calg = xfrm_algo_clone(orig->calg);
2001 if (!x->calg)
2002 goto error;
2003 }
2004 x->props.calgo = orig->props.calgo;
2005
2006 if (encap || orig->encap) {
2007 if (encap)
2008 x->encap = kmemdup(encap, sizeof(*x->encap),
2009 GFP_KERNEL);
2010 else
2011 x->encap = kmemdup(orig->encap, sizeof(*x->encap),
2012 GFP_KERNEL);
2013
2014 if (!x->encap)
2015 goto error;
2016 }
2017
2018 if (orig->security)
2019 if (clone_security(x, orig->security))
2020 goto error;
2021
2022 if (orig->coaddr) {
2023 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
2024 GFP_KERNEL);
2025 if (!x->coaddr)
2026 goto error;
2027 }
2028
2029 if (orig->replay_esn) {
2030 if (xfrm_replay_clone(x, orig))
2031 goto error;
2032 }
2033
2034 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
2035 memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
2036
2037 x->props.flags = orig->props.flags;
2038 x->props.extra_flags = orig->props.extra_flags;
2039
2040 x->pcpu_num = orig->pcpu_num;
2041 x->if_id = orig->if_id;
2042 x->tfcpad = orig->tfcpad;
2043 x->replay_maxdiff = orig->replay_maxdiff;
2044 x->replay_maxage = orig->replay_maxage;
2045 memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft));
2046 x->km.state = orig->km.state;
2047 x->km.seq = orig->km.seq;
2048 x->replay = orig->replay;
2049 x->preplay = orig->preplay;
2050 x->mapping_maxage = orig->mapping_maxage;
2051 x->lastused = orig->lastused;
2052 x->new_mapping = 0;
2053 x->new_mapping_sport = 0;
2054 x->dir = orig->dir;
2055
2056 x->mode_cbs = orig->mode_cbs;
2057 if (x->mode_cbs && x->mode_cbs->clone_state) {
2058 if (x->mode_cbs->clone_state(x, orig))
2059 goto error;
2060 }
2061
2062
2063 x->props.family = m->new_family;
2064 memcpy(&x->id.daddr, &m->new_daddr, sizeof(x->id.daddr));
2065 memcpy(&x->props.saddr, &m->new_saddr, sizeof(x->props.saddr));
2066
2067 return x;
2068
2069 error:
2070 xfrm_state_put(x);
2071 out:
2072 return NULL;
2073 }
2074
xfrm_migrate_state_find(struct xfrm_migrate * m,struct net * net,u32 if_id)2075 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
2076 u32 if_id)
2077 {
2078 unsigned int h;
2079 struct xfrm_state *x = NULL;
2080
2081 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2082
2083 if (m->reqid) {
2084 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
2085 m->reqid, m->old_family);
2086 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
2087 if (x->props.mode != m->mode ||
2088 x->id.proto != m->proto)
2089 continue;
2090 if (m->reqid && x->props.reqid != m->reqid)
2091 continue;
2092 if (if_id != 0 && x->if_id != if_id)
2093 continue;
2094 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
2095 m->old_family) ||
2096 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
2097 m->old_family))
2098 continue;
2099 xfrm_state_hold(x);
2100 break;
2101 }
2102 } else {
2103 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
2104 m->old_family);
2105 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
2106 if (x->props.mode != m->mode ||
2107 x->id.proto != m->proto)
2108 continue;
2109 if (if_id != 0 && x->if_id != if_id)
2110 continue;
2111 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
2112 m->old_family) ||
2113 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
2114 m->old_family))
2115 continue;
2116 xfrm_state_hold(x);
2117 break;
2118 }
2119 }
2120
2121 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2122
2123 return x;
2124 }
2125 EXPORT_SYMBOL(xfrm_migrate_state_find);
2126
xfrm_state_migrate(struct xfrm_state * x,struct xfrm_migrate * m,struct xfrm_encap_tmpl * encap,struct net * net,struct xfrm_user_offload * xuo,struct netlink_ext_ack * extack)2127 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
2128 struct xfrm_migrate *m,
2129 struct xfrm_encap_tmpl *encap,
2130 struct net *net,
2131 struct xfrm_user_offload *xuo,
2132 struct netlink_ext_ack *extack)
2133 {
2134 struct xfrm_state *xc;
2135
2136 xc = xfrm_state_clone_and_setup(x, encap, m);
2137 if (!xc)
2138 return NULL;
2139
2140 if (xfrm_init_state(xc) < 0)
2141 goto error;
2142
2143 /* configure the hardware if offload is requested */
2144 if (xuo && xfrm_dev_state_add(net, xc, xuo, extack))
2145 goto error;
2146
2147 /* add state */
2148 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
2149 /* a care is needed when the destination address of the
2150 state is to be updated as it is a part of triplet */
2151 xfrm_state_insert(xc);
2152 } else {
2153 if (xfrm_state_add(xc) < 0)
2154 goto error;
2155 }
2156
2157 return xc;
2158 error:
2159 xfrm_state_put(xc);
2160 return NULL;
2161 }
2162 EXPORT_SYMBOL(xfrm_state_migrate);
2163 #endif
2164
xfrm_state_update(struct xfrm_state * x)2165 int xfrm_state_update(struct xfrm_state *x)
2166 {
2167 struct xfrm_state *x1, *to_put;
2168 int err;
2169 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
2170 struct net *net = xs_net(x);
2171
2172 to_put = NULL;
2173
2174 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2175 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
2176
2177 err = -ESRCH;
2178 if (!x1)
2179 goto out;
2180
2181 if (xfrm_state_kern(x1)) {
2182 to_put = x1;
2183 err = -EEXIST;
2184 goto out;
2185 }
2186
2187 if (x1->km.state == XFRM_STATE_ACQ) {
2188 if (x->dir && x1->dir != x->dir)
2189 goto out;
2190
2191 __xfrm_state_insert(x);
2192 x = NULL;
2193 } else {
2194 if (x1->dir != x->dir)
2195 goto out;
2196 }
2197 err = 0;
2198
2199 out:
2200 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2201
2202 if (to_put)
2203 xfrm_state_put(to_put);
2204
2205 if (err)
2206 return err;
2207
2208 if (!x) {
2209 xfrm_state_delete(x1);
2210 xfrm_state_put(x1);
2211 return 0;
2212 }
2213
2214 err = -EINVAL;
2215 spin_lock_bh(&x1->lock);
2216 if (likely(x1->km.state == XFRM_STATE_VALID)) {
2217 if (x->encap && x1->encap &&
2218 x->encap->encap_type == x1->encap->encap_type)
2219 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
2220 else if (x->encap || x1->encap)
2221 goto fail;
2222
2223 if (x->coaddr && x1->coaddr) {
2224 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
2225 }
2226 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
2227 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
2228 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
2229 x1->km.dying = 0;
2230
2231 hrtimer_start(&x1->mtimer, ktime_set(1, 0),
2232 HRTIMER_MODE_REL_SOFT);
2233 if (READ_ONCE(x1->curlft.use_time))
2234 xfrm_state_check_expire(x1);
2235
2236 if (x->props.smark.m || x->props.smark.v || x->if_id) {
2237 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2238
2239 if (x->props.smark.m || x->props.smark.v)
2240 x1->props.smark = x->props.smark;
2241
2242 if (x->if_id)
2243 x1->if_id = x->if_id;
2244
2245 __xfrm_state_bump_genids(x1);
2246 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2247 }
2248
2249 err = 0;
2250 x->km.state = XFRM_STATE_DEAD;
2251 __xfrm_state_put(x);
2252 }
2253
2254 fail:
2255 spin_unlock_bh(&x1->lock);
2256
2257 xfrm_state_put(x1);
2258
2259 return err;
2260 }
2261 EXPORT_SYMBOL(xfrm_state_update);
2262
xfrm_state_check_expire(struct xfrm_state * x)2263 int xfrm_state_check_expire(struct xfrm_state *x)
2264 {
2265 xfrm_dev_state_update_stats(x);
2266
2267 if (!READ_ONCE(x->curlft.use_time))
2268 WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
2269
2270 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
2271 x->curlft.packets >= x->lft.hard_packet_limit) {
2272 x->km.state = XFRM_STATE_EXPIRED;
2273 hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
2274 return -EINVAL;
2275 }
2276
2277 if (!x->km.dying &&
2278 (x->curlft.bytes >= x->lft.soft_byte_limit ||
2279 x->curlft.packets >= x->lft.soft_packet_limit)) {
2280 x->km.dying = 1;
2281 km_state_expired(x, 0, 0);
2282 }
2283 return 0;
2284 }
2285 EXPORT_SYMBOL(xfrm_state_check_expire);
2286
xfrm_state_update_stats(struct net * net)2287 void xfrm_state_update_stats(struct net *net)
2288 {
2289 struct xfrm_state *x;
2290 int i;
2291
2292 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2293 for (i = 0; i <= net->xfrm.state_hmask; i++) {
2294 hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst)
2295 xfrm_dev_state_update_stats(x);
2296 }
2297 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2298 }
2299
2300 struct xfrm_state *
xfrm_state_lookup(struct net * net,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)2301 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
2302 u8 proto, unsigned short family)
2303 {
2304 struct xfrm_hash_state_ptrs state_ptrs;
2305 struct xfrm_state *x;
2306
2307 rcu_read_lock();
2308 xfrm_hash_ptrs_get(net, &state_ptrs);
2309
2310 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
2311 rcu_read_unlock();
2312 return x;
2313 }
2314 EXPORT_SYMBOL(xfrm_state_lookup);
2315
2316 struct xfrm_state *
xfrm_state_lookup_byaddr(struct net * net,u32 mark,const xfrm_address_t * daddr,const xfrm_address_t * saddr,u8 proto,unsigned short family)2317 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
2318 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
2319 u8 proto, unsigned short family)
2320 {
2321 struct xfrm_hash_state_ptrs state_ptrs;
2322 struct xfrm_state *x;
2323
2324 rcu_read_lock();
2325
2326 xfrm_hash_ptrs_get(net, &state_ptrs);
2327
2328 x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family);
2329 rcu_read_unlock();
2330 return x;
2331 }
2332 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
2333
2334 struct xfrm_state *
xfrm_find_acq(struct net * net,const struct xfrm_mark * mark,u8 mode,u32 reqid,u32 if_id,u32 pcpu_num,u8 proto,const xfrm_address_t * daddr,const xfrm_address_t * saddr,int create,unsigned short family)2335 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
2336 u32 if_id, u32 pcpu_num, u8 proto, const xfrm_address_t *daddr,
2337 const xfrm_address_t *saddr, int create, unsigned short family)
2338 {
2339 struct xfrm_state *x;
2340
2341 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2342 x = __find_acq_core(net, mark, family, mode, reqid, if_id, pcpu_num,
2343 proto, daddr, saddr, create);
2344 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2345
2346 return x;
2347 }
2348 EXPORT_SYMBOL(xfrm_find_acq);
2349
2350 #ifdef CONFIG_XFRM_SUB_POLICY
2351 #if IS_ENABLED(CONFIG_IPV6)
2352 /* distribution counting sort function for xfrm_state and xfrm_tmpl */
2353 static void
__xfrm6_sort(void ** dst,void ** src,int n,int (* cmp)(const void * p),int maxclass)2354 __xfrm6_sort(void **dst, void **src, int n,
2355 int (*cmp)(const void *p), int maxclass)
2356 {
2357 int count[XFRM_MAX_DEPTH] = { };
2358 int class[XFRM_MAX_DEPTH];
2359 int i;
2360
2361 for (i = 0; i < n; i++) {
2362 int c = cmp(src[i]);
2363
2364 class[i] = c;
2365 count[c]++;
2366 }
2367
2368 for (i = 2; i < maxclass; i++)
2369 count[i] += count[i - 1];
2370
2371 for (i = 0; i < n; i++) {
2372 dst[count[class[i] - 1]++] = src[i];
2373 src[i] = NULL;
2374 }
2375 }
2376
2377 /* Rule for xfrm_state:
2378 *
2379 * rule 1: select IPsec transport except AH
2380 * rule 2: select MIPv6 RO or inbound trigger
2381 * rule 3: select IPsec transport AH
2382 * rule 4: select IPsec tunnel
2383 * rule 5: others
2384 */
__xfrm6_state_sort_cmp(const void * p)2385 static int __xfrm6_state_sort_cmp(const void *p)
2386 {
2387 const struct xfrm_state *v = p;
2388
2389 switch (v->props.mode) {
2390 case XFRM_MODE_TRANSPORT:
2391 if (v->id.proto != IPPROTO_AH)
2392 return 1;
2393 else
2394 return 3;
2395 #if IS_ENABLED(CONFIG_IPV6_MIP6)
2396 case XFRM_MODE_ROUTEOPTIMIZATION:
2397 case XFRM_MODE_IN_TRIGGER:
2398 return 2;
2399 #endif
2400 case XFRM_MODE_TUNNEL:
2401 case XFRM_MODE_BEET:
2402 case XFRM_MODE_IPTFS:
2403 return 4;
2404 }
2405 return 5;
2406 }
2407
2408 /* Rule for xfrm_tmpl:
2409 *
2410 * rule 1: select IPsec transport
2411 * rule 2: select MIPv6 RO or inbound trigger
2412 * rule 3: select IPsec tunnel
2413 * rule 4: others
2414 */
__xfrm6_tmpl_sort_cmp(const void * p)2415 static int __xfrm6_tmpl_sort_cmp(const void *p)
2416 {
2417 const struct xfrm_tmpl *v = p;
2418
2419 switch (v->mode) {
2420 case XFRM_MODE_TRANSPORT:
2421 return 1;
2422 #if IS_ENABLED(CONFIG_IPV6_MIP6)
2423 case XFRM_MODE_ROUTEOPTIMIZATION:
2424 case XFRM_MODE_IN_TRIGGER:
2425 return 2;
2426 #endif
2427 case XFRM_MODE_TUNNEL:
2428 case XFRM_MODE_BEET:
2429 case XFRM_MODE_IPTFS:
2430 return 3;
2431 }
2432 return 4;
2433 }
2434 #else
__xfrm6_state_sort_cmp(const void * p)2435 static inline int __xfrm6_state_sort_cmp(const void *p) { return 5; }
__xfrm6_tmpl_sort_cmp(const void * p)2436 static inline int __xfrm6_tmpl_sort_cmp(const void *p) { return 4; }
2437
2438 static inline void
__xfrm6_sort(void ** dst,void ** src,int n,int (* cmp)(const void * p),int maxclass)2439 __xfrm6_sort(void **dst, void **src, int n,
2440 int (*cmp)(const void *p), int maxclass)
2441 {
2442 int i;
2443
2444 for (i = 0; i < n; i++)
2445 dst[i] = src[i];
2446 }
2447 #endif /* CONFIG_IPV6 */
2448
2449 void
xfrm_tmpl_sort(struct xfrm_tmpl ** dst,struct xfrm_tmpl ** src,int n,unsigned short family)2450 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
2451 unsigned short family)
2452 {
2453 int i;
2454
2455 if (family == AF_INET6)
2456 __xfrm6_sort((void **)dst, (void **)src, n,
2457 __xfrm6_tmpl_sort_cmp, 5);
2458 else
2459 for (i = 0; i < n; i++)
2460 dst[i] = src[i];
2461 }
2462
2463 void
xfrm_state_sort(struct xfrm_state ** dst,struct xfrm_state ** src,int n,unsigned short family)2464 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
2465 unsigned short family)
2466 {
2467 int i;
2468
2469 if (family == AF_INET6)
2470 __xfrm6_sort((void **)dst, (void **)src, n,
2471 __xfrm6_state_sort_cmp, 6);
2472 else
2473 for (i = 0; i < n; i++)
2474 dst[i] = src[i];
2475 }
2476 #endif
2477
2478 /* Silly enough, but I'm lazy to build resolution list */
2479
__xfrm_find_acq_byseq(struct net * net,u32 mark,u32 seq,u32 pcpu_num)2480 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
2481 {
2482 unsigned int h = xfrm_seq_hash(net, seq);
2483 struct xfrm_state *x;
2484
2485 hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) {
2486 if (x->km.seq == seq &&
2487 (mark & x->mark.m) == x->mark.v &&
2488 x->pcpu_num == pcpu_num &&
2489 x->km.state == XFRM_STATE_ACQ) {
2490 xfrm_state_hold(x);
2491 return x;
2492 }
2493 }
2494
2495 return NULL;
2496 }
2497
xfrm_find_acq_byseq(struct net * net,u32 mark,u32 seq,u32 pcpu_num)2498 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
2499 {
2500 struct xfrm_state *x;
2501
2502 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2503 x = __xfrm_find_acq_byseq(net, mark, seq, pcpu_num);
2504 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2505 return x;
2506 }
2507 EXPORT_SYMBOL(xfrm_find_acq_byseq);
2508
xfrm_get_acqseq(void)2509 u32 xfrm_get_acqseq(void)
2510 {
2511 u32 res;
2512 static atomic_t acqseq;
2513
2514 do {
2515 res = atomic_inc_return(&acqseq);
2516 } while (!res);
2517
2518 return res;
2519 }
2520 EXPORT_SYMBOL(xfrm_get_acqseq);
2521
verify_spi_info(u8 proto,u32 min,u32 max,struct netlink_ext_ack * extack)2522 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack)
2523 {
2524 switch (proto) {
2525 case IPPROTO_AH:
2526 case IPPROTO_ESP:
2527 break;
2528
2529 case IPPROTO_COMP:
2530 /* IPCOMP spi is 16-bits. */
2531 if (max >= 0x10000) {
2532 NL_SET_ERR_MSG(extack, "IPCOMP SPI must be <= 65535");
2533 return -EINVAL;
2534 }
2535 break;
2536
2537 default:
2538 NL_SET_ERR_MSG(extack, "Invalid protocol, must be one of AH, ESP, IPCOMP");
2539 return -EINVAL;
2540 }
2541
2542 if (min > max) {
2543 NL_SET_ERR_MSG(extack, "Invalid SPI range: min > max");
2544 return -EINVAL;
2545 }
2546
2547 return 0;
2548 }
2549 EXPORT_SYMBOL(verify_spi_info);
2550
xfrm_alloc_spi(struct xfrm_state * x,u32 low,u32 high,struct netlink_ext_ack * extack)2551 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
2552 struct netlink_ext_ack *extack)
2553 {
2554 struct net *net = xs_net(x);
2555 unsigned int h;
2556 struct xfrm_state *x0;
2557 int err = -ENOENT;
2558 __be32 minspi = htonl(low);
2559 __be32 maxspi = htonl(high);
2560 __be32 newspi = 0;
2561 u32 mark = x->mark.v & x->mark.m;
2562
2563 spin_lock_bh(&x->lock);
2564 if (x->km.state == XFRM_STATE_DEAD) {
2565 NL_SET_ERR_MSG(extack, "Target ACQUIRE is in DEAD state");
2566 goto unlock;
2567 }
2568
2569 err = 0;
2570 if (x->id.spi)
2571 goto unlock;
2572
2573 err = -ENOENT;
2574
2575 if (minspi == maxspi) {
2576 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
2577 if (x0) {
2578 NL_SET_ERR_MSG(extack, "Requested SPI is already in use");
2579 xfrm_state_put(x0);
2580 goto unlock;
2581 }
2582 newspi = minspi;
2583 } else {
2584 u32 spi = 0;
2585 for (h = 0; h < high-low+1; h++) {
2586 spi = get_random_u32_inclusive(low, high);
2587 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
2588 if (x0 == NULL) {
2589 newspi = htonl(spi);
2590 break;
2591 }
2592 xfrm_state_put(x0);
2593 }
2594 }
2595 if (newspi) {
2596 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2597 x->id.spi = newspi;
2598 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
2599 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
2600 x->xso.type);
2601 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2602
2603 err = 0;
2604 } else {
2605 NL_SET_ERR_MSG(extack, "No SPI available in the requested range");
2606 }
2607
2608 unlock:
2609 spin_unlock_bh(&x->lock);
2610
2611 return err;
2612 }
2613 EXPORT_SYMBOL(xfrm_alloc_spi);
2614
__xfrm_state_filter_match(struct xfrm_state * x,struct xfrm_address_filter * filter)2615 static bool __xfrm_state_filter_match(struct xfrm_state *x,
2616 struct xfrm_address_filter *filter)
2617 {
2618 if (filter) {
2619 if ((filter->family == AF_INET ||
2620 filter->family == AF_INET6) &&
2621 x->props.family != filter->family)
2622 return false;
2623
2624 return addr_match(&x->props.saddr, &filter->saddr,
2625 filter->splen) &&
2626 addr_match(&x->id.daddr, &filter->daddr,
2627 filter->dplen);
2628 }
2629 return true;
2630 }
2631
xfrm_state_walk(struct net * net,struct xfrm_state_walk * walk,int (* func)(struct xfrm_state *,int,void *),void * data)2632 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
2633 int (*func)(struct xfrm_state *, int, void*),
2634 void *data)
2635 {
2636 struct xfrm_state *state;
2637 struct xfrm_state_walk *x;
2638 int err = 0;
2639
2640 if (walk->seq != 0 && list_empty(&walk->all))
2641 return 0;
2642
2643 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2644 if (list_empty(&walk->all))
2645 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
2646 else
2647 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
2648 list_for_each_entry_from(x, &net->xfrm.state_all, all) {
2649 if (x->state == XFRM_STATE_DEAD)
2650 continue;
2651 state = container_of(x, struct xfrm_state, km);
2652 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
2653 continue;
2654 if (!__xfrm_state_filter_match(state, walk->filter))
2655 continue;
2656 err = func(state, walk->seq, data);
2657 if (err) {
2658 list_move_tail(&walk->all, &x->all);
2659 goto out;
2660 }
2661 walk->seq++;
2662 }
2663 if (walk->seq == 0) {
2664 err = -ENOENT;
2665 goto out;
2666 }
2667 list_del_init(&walk->all);
2668 out:
2669 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2670 return err;
2671 }
2672 EXPORT_SYMBOL(xfrm_state_walk);
2673
xfrm_state_walk_init(struct xfrm_state_walk * walk,u8 proto,struct xfrm_address_filter * filter)2674 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
2675 struct xfrm_address_filter *filter)
2676 {
2677 INIT_LIST_HEAD(&walk->all);
2678 walk->proto = proto;
2679 walk->state = XFRM_STATE_DEAD;
2680 walk->seq = 0;
2681 walk->filter = filter;
2682 }
2683 EXPORT_SYMBOL(xfrm_state_walk_init);
2684
xfrm_state_walk_done(struct xfrm_state_walk * walk,struct net * net)2685 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
2686 {
2687 kfree(walk->filter);
2688
2689 if (list_empty(&walk->all))
2690 return;
2691
2692 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2693 list_del(&walk->all);
2694 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2695 }
2696 EXPORT_SYMBOL(xfrm_state_walk_done);
2697
xfrm_replay_timer_handler(struct timer_list * t)2698 static void xfrm_replay_timer_handler(struct timer_list *t)
2699 {
2700 struct xfrm_state *x = timer_container_of(x, t, rtimer);
2701
2702 spin_lock(&x->lock);
2703
2704 if (x->km.state == XFRM_STATE_VALID) {
2705 if (xfrm_aevent_is_on(xs_net(x)))
2706 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
2707 else
2708 x->xflags |= XFRM_TIME_DEFER;
2709 }
2710
2711 spin_unlock(&x->lock);
2712 }
2713
2714 static LIST_HEAD(xfrm_km_list);
2715
km_policy_notify(struct xfrm_policy * xp,int dir,const struct km_event * c)2716 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2717 {
2718 struct xfrm_mgr *km;
2719
2720 rcu_read_lock();
2721 list_for_each_entry_rcu(km, &xfrm_km_list, list)
2722 if (km->notify_policy)
2723 km->notify_policy(xp, dir, c);
2724 rcu_read_unlock();
2725 }
2726
km_state_notify(struct xfrm_state * x,const struct km_event * c)2727 void km_state_notify(struct xfrm_state *x, const struct km_event *c)
2728 {
2729 struct xfrm_mgr *km;
2730 rcu_read_lock();
2731 list_for_each_entry_rcu(km, &xfrm_km_list, list)
2732 if (km->notify)
2733 km->notify(x, c);
2734 rcu_read_unlock();
2735 }
2736
2737 EXPORT_SYMBOL(km_policy_notify);
2738 EXPORT_SYMBOL(km_state_notify);
2739
km_state_expired(struct xfrm_state * x,int hard,u32 portid)2740 void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
2741 {
2742 struct km_event c;
2743
2744 c.data.hard = hard;
2745 c.portid = portid;
2746 c.event = XFRM_MSG_EXPIRE;
2747 km_state_notify(x, &c);
2748 }
2749
2750 EXPORT_SYMBOL(km_state_expired);
2751 /*
2752 * We send to all registered managers regardless of failure
2753 * We are happy with one success
2754 */
km_query(struct xfrm_state * x,struct xfrm_tmpl * t,struct xfrm_policy * pol)2755 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
2756 {
2757 int err = -EINVAL, acqret;
2758 struct xfrm_mgr *km;
2759
2760 rcu_read_lock();
2761 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2762 acqret = km->acquire(x, t, pol);
2763 if (!acqret)
2764 err = acqret;
2765 }
2766 rcu_read_unlock();
2767 return err;
2768 }
2769 EXPORT_SYMBOL(km_query);
2770
__km_new_mapping(struct xfrm_state * x,xfrm_address_t * ipaddr,__be16 sport)2771 static int __km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2772 {
2773 int err = -EINVAL;
2774 struct xfrm_mgr *km;
2775
2776 rcu_read_lock();
2777 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2778 if (km->new_mapping)
2779 err = km->new_mapping(x, ipaddr, sport);
2780 if (!err)
2781 break;
2782 }
2783 rcu_read_unlock();
2784 return err;
2785 }
2786
km_new_mapping(struct xfrm_state * x,xfrm_address_t * ipaddr,__be16 sport)2787 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2788 {
2789 int ret = 0;
2790
2791 if (x->mapping_maxage) {
2792 if ((jiffies / HZ - x->new_mapping) > x->mapping_maxage ||
2793 x->new_mapping_sport != sport) {
2794 x->new_mapping_sport = sport;
2795 x->new_mapping = jiffies / HZ;
2796 ret = __km_new_mapping(x, ipaddr, sport);
2797 }
2798 } else {
2799 ret = __km_new_mapping(x, ipaddr, sport);
2800 }
2801
2802 return ret;
2803 }
2804 EXPORT_SYMBOL(km_new_mapping);
2805
km_policy_expired(struct xfrm_policy * pol,int dir,int hard,u32 portid)2806 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
2807 {
2808 struct km_event c;
2809
2810 c.data.hard = hard;
2811 c.portid = portid;
2812 c.event = XFRM_MSG_POLEXPIRE;
2813 km_policy_notify(pol, dir, &c);
2814 }
2815 EXPORT_SYMBOL(km_policy_expired);
2816
2817 #ifdef CONFIG_XFRM_MIGRATE
km_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,const struct xfrm_migrate * m,int num_migrate,const struct xfrm_kmaddress * k,const struct xfrm_encap_tmpl * encap)2818 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2819 const struct xfrm_migrate *m, int num_migrate,
2820 const struct xfrm_kmaddress *k,
2821 const struct xfrm_encap_tmpl *encap)
2822 {
2823 int err = -EINVAL;
2824 int ret;
2825 struct xfrm_mgr *km;
2826
2827 rcu_read_lock();
2828 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2829 if (km->migrate) {
2830 ret = km->migrate(sel, dir, type, m, num_migrate, k,
2831 encap);
2832 if (!ret)
2833 err = ret;
2834 }
2835 }
2836 rcu_read_unlock();
2837 return err;
2838 }
2839 EXPORT_SYMBOL(km_migrate);
2840 #endif
2841
km_report(struct net * net,u8 proto,struct xfrm_selector * sel,xfrm_address_t * addr)2842 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
2843 {
2844 int err = -EINVAL;
2845 int ret;
2846 struct xfrm_mgr *km;
2847
2848 rcu_read_lock();
2849 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2850 if (km->report) {
2851 ret = km->report(net, proto, sel, addr);
2852 if (!ret)
2853 err = ret;
2854 }
2855 }
2856 rcu_read_unlock();
2857 return err;
2858 }
2859 EXPORT_SYMBOL(km_report);
2860
km_is_alive(const struct km_event * c)2861 static bool km_is_alive(const struct km_event *c)
2862 {
2863 struct xfrm_mgr *km;
2864 bool is_alive = false;
2865
2866 rcu_read_lock();
2867 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2868 if (km->is_alive && km->is_alive(c)) {
2869 is_alive = true;
2870 break;
2871 }
2872 }
2873 rcu_read_unlock();
2874
2875 return is_alive;
2876 }
2877
2878 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2879 static DEFINE_SPINLOCK(xfrm_translator_lock);
2880 static struct xfrm_translator __rcu *xfrm_translator;
2881
xfrm_get_translator(void)2882 struct xfrm_translator *xfrm_get_translator(void)
2883 {
2884 struct xfrm_translator *xtr;
2885
2886 rcu_read_lock();
2887 xtr = rcu_dereference(xfrm_translator);
2888 if (unlikely(!xtr))
2889 goto out;
2890 if (!try_module_get(xtr->owner))
2891 xtr = NULL;
2892 out:
2893 rcu_read_unlock();
2894 return xtr;
2895 }
2896 EXPORT_SYMBOL_GPL(xfrm_get_translator);
2897
xfrm_put_translator(struct xfrm_translator * xtr)2898 void xfrm_put_translator(struct xfrm_translator *xtr)
2899 {
2900 module_put(xtr->owner);
2901 }
2902 EXPORT_SYMBOL_GPL(xfrm_put_translator);
2903
xfrm_register_translator(struct xfrm_translator * xtr)2904 int xfrm_register_translator(struct xfrm_translator *xtr)
2905 {
2906 int err = 0;
2907
2908 spin_lock_bh(&xfrm_translator_lock);
2909 if (unlikely(xfrm_translator != NULL))
2910 err = -EEXIST;
2911 else
2912 rcu_assign_pointer(xfrm_translator, xtr);
2913 spin_unlock_bh(&xfrm_translator_lock);
2914
2915 return err;
2916 }
2917 EXPORT_SYMBOL_GPL(xfrm_register_translator);
2918
xfrm_unregister_translator(struct xfrm_translator * xtr)2919 int xfrm_unregister_translator(struct xfrm_translator *xtr)
2920 {
2921 int err = 0;
2922
2923 spin_lock_bh(&xfrm_translator_lock);
2924 if (likely(xfrm_translator != NULL)) {
2925 if (rcu_access_pointer(xfrm_translator) != xtr)
2926 err = -EINVAL;
2927 else
2928 RCU_INIT_POINTER(xfrm_translator, NULL);
2929 }
2930 spin_unlock_bh(&xfrm_translator_lock);
2931 synchronize_rcu();
2932
2933 return err;
2934 }
2935 EXPORT_SYMBOL_GPL(xfrm_unregister_translator);
2936 #endif
2937
xfrm_user_policy(struct sock * sk,int optname,sockptr_t optval,int optlen)2938 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen)
2939 {
2940 int err;
2941 u8 *data;
2942 struct xfrm_mgr *km;
2943 struct xfrm_policy *pol = NULL;
2944
2945 if (sockptr_is_null(optval) && !optlen) {
2946 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2947 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2948 __sk_dst_reset(sk);
2949 return 0;
2950 }
2951
2952 if (optlen <= 0 || optlen > PAGE_SIZE)
2953 return -EMSGSIZE;
2954
2955 data = memdup_sockptr(optval, optlen);
2956 if (IS_ERR(data))
2957 return PTR_ERR(data);
2958
2959 if (in_compat_syscall()) {
2960 struct xfrm_translator *xtr = xfrm_get_translator();
2961
2962 if (!xtr) {
2963 kfree(data);
2964 return -EOPNOTSUPP;
2965 }
2966
2967 err = xtr->xlate_user_policy_sockptr(&data, optlen);
2968 xfrm_put_translator(xtr);
2969 if (err) {
2970 kfree(data);
2971 return err;
2972 }
2973 }
2974
2975 err = -EINVAL;
2976 rcu_read_lock();
2977 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2978 pol = km->compile_policy(sk, optname, data,
2979 optlen, &err);
2980 if (err >= 0)
2981 break;
2982 }
2983 rcu_read_unlock();
2984
2985 if (err >= 0) {
2986 xfrm_sk_policy_insert(sk, err, pol);
2987 xfrm_pol_put(pol);
2988 __sk_dst_reset(sk);
2989 err = 0;
2990 }
2991
2992 kfree(data);
2993 return err;
2994 }
2995 EXPORT_SYMBOL(xfrm_user_policy);
2996
2997 static DEFINE_SPINLOCK(xfrm_km_lock);
2998
xfrm_register_km(struct xfrm_mgr * km)2999 void xfrm_register_km(struct xfrm_mgr *km)
3000 {
3001 spin_lock_bh(&xfrm_km_lock);
3002 list_add_tail_rcu(&km->list, &xfrm_km_list);
3003 spin_unlock_bh(&xfrm_km_lock);
3004 }
3005 EXPORT_SYMBOL(xfrm_register_km);
3006
xfrm_unregister_km(struct xfrm_mgr * km)3007 void xfrm_unregister_km(struct xfrm_mgr *km)
3008 {
3009 spin_lock_bh(&xfrm_km_lock);
3010 list_del_rcu(&km->list);
3011 spin_unlock_bh(&xfrm_km_lock);
3012 synchronize_rcu();
3013 }
3014 EXPORT_SYMBOL(xfrm_unregister_km);
3015
xfrm_state_register_afinfo(struct xfrm_state_afinfo * afinfo)3016 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
3017 {
3018 int err = 0;
3019
3020 if (WARN_ON(afinfo->family >= NPROTO))
3021 return -EAFNOSUPPORT;
3022
3023 spin_lock_bh(&xfrm_state_afinfo_lock);
3024 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
3025 err = -EEXIST;
3026 else
3027 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
3028 spin_unlock_bh(&xfrm_state_afinfo_lock);
3029 return err;
3030 }
3031 EXPORT_SYMBOL(xfrm_state_register_afinfo);
3032
xfrm_state_unregister_afinfo(struct xfrm_state_afinfo * afinfo)3033 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
3034 {
3035 int err = 0, family = afinfo->family;
3036
3037 if (WARN_ON(family >= NPROTO))
3038 return -EAFNOSUPPORT;
3039
3040 spin_lock_bh(&xfrm_state_afinfo_lock);
3041 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
3042 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
3043 err = -EINVAL;
3044 else
3045 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
3046 }
3047 spin_unlock_bh(&xfrm_state_afinfo_lock);
3048 synchronize_rcu();
3049 return err;
3050 }
3051 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
3052
xfrm_state_afinfo_get_rcu(unsigned int family)3053 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
3054 {
3055 if (unlikely(family >= NPROTO))
3056 return NULL;
3057
3058 return rcu_dereference(xfrm_state_afinfo[family]);
3059 }
3060 EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu);
3061
xfrm_state_get_afinfo(unsigned int family)3062 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
3063 {
3064 struct xfrm_state_afinfo *afinfo;
3065 if (unlikely(family >= NPROTO))
3066 return NULL;
3067 rcu_read_lock();
3068 afinfo = rcu_dereference(xfrm_state_afinfo[family]);
3069 if (unlikely(!afinfo))
3070 rcu_read_unlock();
3071 return afinfo;
3072 }
3073
xfrm_flush_gc(void)3074 void xfrm_flush_gc(void)
3075 {
3076 flush_work(&xfrm_state_gc_work);
3077 }
3078 EXPORT_SYMBOL(xfrm_flush_gc);
3079
3080 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
xfrm_state_delete_tunnel(struct xfrm_state * x)3081 void xfrm_state_delete_tunnel(struct xfrm_state *x)
3082 {
3083 if (x->tunnel) {
3084 struct xfrm_state *t = x->tunnel;
3085
3086 if (atomic_read(&t->tunnel_users) == 2)
3087 xfrm_state_delete(t);
3088 atomic_dec(&t->tunnel_users);
3089 xfrm_state_put_sync(t);
3090 x->tunnel = NULL;
3091 }
3092 }
3093 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
3094
xfrm_state_mtu(struct xfrm_state * x,int mtu)3095 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
3096 {
3097 const struct xfrm_type *type = READ_ONCE(x->type);
3098 struct crypto_aead *aead;
3099 u32 blksize, net_adj = 0;
3100
3101 if (x->km.state != XFRM_STATE_VALID ||
3102 !type || type->proto != IPPROTO_ESP)
3103 return mtu - x->props.header_len;
3104
3105 aead = x->data;
3106 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
3107
3108 switch (x->props.mode) {
3109 case XFRM_MODE_TRANSPORT:
3110 case XFRM_MODE_BEET:
3111 if (x->props.family == AF_INET)
3112 net_adj = sizeof(struct iphdr);
3113 else if (x->props.family == AF_INET6)
3114 net_adj = sizeof(struct ipv6hdr);
3115 break;
3116 case XFRM_MODE_TUNNEL:
3117 break;
3118 default:
3119 if (x->mode_cbs && x->mode_cbs->get_inner_mtu)
3120 return x->mode_cbs->get_inner_mtu(x, mtu);
3121
3122 WARN_ON_ONCE(1);
3123 break;
3124 }
3125
3126 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
3127 net_adj) & ~(blksize - 1)) + net_adj - 2;
3128 }
3129 EXPORT_SYMBOL_GPL(xfrm_state_mtu);
3130
__xfrm_init_state(struct xfrm_state * x,struct netlink_ext_ack * extack)3131 int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
3132 {
3133 const struct xfrm_mode *inner_mode;
3134 const struct xfrm_mode *outer_mode;
3135 int family = x->props.family;
3136 int err;
3137
3138 if (family == AF_INET &&
3139 READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc))
3140 x->props.flags |= XFRM_STATE_NOPMTUDISC;
3141
3142 err = -EPROTONOSUPPORT;
3143
3144 if (x->sel.family != AF_UNSPEC) {
3145 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
3146 if (inner_mode == NULL) {
3147 NL_SET_ERR_MSG(extack, "Requested mode not found");
3148 goto error;
3149 }
3150
3151 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
3152 family != x->sel.family) {
3153 NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate a change of family");
3154 goto error;
3155 }
3156
3157 x->inner_mode = *inner_mode;
3158 } else {
3159 const struct xfrm_mode *inner_mode_iaf;
3160 int iafamily = AF_INET;
3161
3162 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
3163 if (inner_mode == NULL) {
3164 NL_SET_ERR_MSG(extack, "Requested mode not found");
3165 goto error;
3166 }
3167
3168 x->inner_mode = *inner_mode;
3169
3170 if (x->props.family == AF_INET)
3171 iafamily = AF_INET6;
3172
3173 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
3174 if (inner_mode_iaf) {
3175 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
3176 x->inner_mode_iaf = *inner_mode_iaf;
3177 }
3178 }
3179
3180 x->type = xfrm_get_type(x->id.proto, family);
3181 if (x->type == NULL) {
3182 NL_SET_ERR_MSG(extack, "Requested type not found");
3183 goto error;
3184 }
3185
3186 err = x->type->init_state(x, extack);
3187 if (err)
3188 goto error;
3189
3190 outer_mode = xfrm_get_mode(x->props.mode, family);
3191 if (!outer_mode) {
3192 NL_SET_ERR_MSG(extack, "Requested mode not found");
3193 err = -EPROTONOSUPPORT;
3194 goto error;
3195 }
3196
3197 x->outer_mode = *outer_mode;
3198 if (x->nat_keepalive_interval) {
3199 if (x->dir != XFRM_SA_DIR_OUT) {
3200 NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs");
3201 err = -EINVAL;
3202 goto error;
3203 }
3204
3205 if (!x->encap || x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
3206 NL_SET_ERR_MSG(extack,
3207 "NAT keepalive is only supported for UDP encapsulation");
3208 err = -EINVAL;
3209 goto error;
3210 }
3211 }
3212
3213 x->mode_cbs = xfrm_get_mode_cbs(x->props.mode);
3214 if (x->mode_cbs) {
3215 if (x->mode_cbs->init_state)
3216 err = x->mode_cbs->init_state(x);
3217 module_put(x->mode_cbs->owner);
3218 }
3219 error:
3220 return err;
3221 }
3222
3223 EXPORT_SYMBOL(__xfrm_init_state);
3224
xfrm_init_state(struct xfrm_state * x)3225 int xfrm_init_state(struct xfrm_state *x)
3226 {
3227 int err;
3228
3229 err = __xfrm_init_state(x, NULL);
3230 if (err)
3231 return err;
3232
3233 err = xfrm_init_replay(x, NULL);
3234 if (err)
3235 return err;
3236
3237 x->km.state = XFRM_STATE_VALID;
3238 return 0;
3239 }
3240
3241 EXPORT_SYMBOL(xfrm_init_state);
3242
xfrm_state_init(struct net * net)3243 int __net_init xfrm_state_init(struct net *net)
3244 {
3245 unsigned int sz;
3246
3247 if (net_eq(net, &init_net))
3248 xfrm_state_cache = KMEM_CACHE(xfrm_state,
3249 SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3250
3251 INIT_LIST_HEAD(&net->xfrm.state_all);
3252
3253 sz = sizeof(struct hlist_head) * 8;
3254
3255 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
3256 if (!net->xfrm.state_bydst)
3257 goto out_bydst;
3258 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
3259 if (!net->xfrm.state_bysrc)
3260 goto out_bysrc;
3261 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
3262 if (!net->xfrm.state_byspi)
3263 goto out_byspi;
3264 net->xfrm.state_byseq = xfrm_hash_alloc(sz);
3265 if (!net->xfrm.state_byseq)
3266 goto out_byseq;
3267
3268 net->xfrm.state_cache_input = alloc_percpu(struct hlist_head);
3269 if (!net->xfrm.state_cache_input)
3270 goto out_state_cache_input;
3271
3272 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
3273
3274 net->xfrm.state_num = 0;
3275 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
3276 spin_lock_init(&net->xfrm.xfrm_state_lock);
3277 seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation,
3278 &net->xfrm.xfrm_state_lock);
3279 return 0;
3280
3281 out_state_cache_input:
3282 xfrm_hash_free(net->xfrm.state_byseq, sz);
3283 out_byseq:
3284 xfrm_hash_free(net->xfrm.state_byspi, sz);
3285 out_byspi:
3286 xfrm_hash_free(net->xfrm.state_bysrc, sz);
3287 out_bysrc:
3288 xfrm_hash_free(net->xfrm.state_bydst, sz);
3289 out_bydst:
3290 return -ENOMEM;
3291 }
3292
xfrm_state_fini(struct net * net)3293 void xfrm_state_fini(struct net *net)
3294 {
3295 unsigned int sz;
3296
3297 flush_work(&net->xfrm.state_hash_work);
3298 flush_work(&xfrm_state_gc_work);
3299 xfrm_state_flush(net, 0, false, true);
3300
3301 WARN_ON(!list_empty(&net->xfrm.state_all));
3302
3303 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
3304 WARN_ON(!hlist_empty(net->xfrm.state_byseq));
3305 xfrm_hash_free(net->xfrm.state_byseq, sz);
3306 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
3307 xfrm_hash_free(net->xfrm.state_byspi, sz);
3308 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
3309 xfrm_hash_free(net->xfrm.state_bysrc, sz);
3310 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
3311 xfrm_hash_free(net->xfrm.state_bydst, sz);
3312 free_percpu(net->xfrm.state_cache_input);
3313 }
3314
3315 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_helper_sainfo(struct xfrm_state * x,struct audit_buffer * audit_buf)3316 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
3317 struct audit_buffer *audit_buf)
3318 {
3319 struct xfrm_sec_ctx *ctx = x->security;
3320 u32 spi = ntohl(x->id.spi);
3321
3322 if (ctx)
3323 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
3324 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3325
3326 switch (x->props.family) {
3327 case AF_INET:
3328 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
3329 &x->props.saddr.a4, &x->id.daddr.a4);
3330 break;
3331 case AF_INET6:
3332 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
3333 x->props.saddr.a6, x->id.daddr.a6);
3334 break;
3335 }
3336
3337 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
3338 }
3339
xfrm_audit_helper_pktinfo(struct sk_buff * skb,u16 family,struct audit_buffer * audit_buf)3340 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
3341 struct audit_buffer *audit_buf)
3342 {
3343 const struct iphdr *iph4;
3344 const struct ipv6hdr *iph6;
3345
3346 switch (family) {
3347 case AF_INET:
3348 iph4 = ip_hdr(skb);
3349 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
3350 &iph4->saddr, &iph4->daddr);
3351 break;
3352 case AF_INET6:
3353 iph6 = ipv6_hdr(skb);
3354 audit_log_format(audit_buf,
3355 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
3356 &iph6->saddr, &iph6->daddr,
3357 iph6->flow_lbl[0] & 0x0f,
3358 iph6->flow_lbl[1],
3359 iph6->flow_lbl[2]);
3360 break;
3361 }
3362 }
3363
xfrm_audit_state_add(struct xfrm_state * x,int result,bool task_valid)3364 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
3365 {
3366 struct audit_buffer *audit_buf;
3367
3368 audit_buf = xfrm_audit_start("SAD-add");
3369 if (audit_buf == NULL)
3370 return;
3371 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3372 xfrm_audit_helper_sainfo(x, audit_buf);
3373 audit_log_format(audit_buf, " res=%u", result);
3374 audit_log_end(audit_buf);
3375 }
3376 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
3377
xfrm_audit_state_delete(struct xfrm_state * x,int result,bool task_valid)3378 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
3379 {
3380 struct audit_buffer *audit_buf;
3381
3382 audit_buf = xfrm_audit_start("SAD-delete");
3383 if (audit_buf == NULL)
3384 return;
3385 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3386 xfrm_audit_helper_sainfo(x, audit_buf);
3387 audit_log_format(audit_buf, " res=%u", result);
3388 audit_log_end(audit_buf);
3389 }
3390 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
3391
xfrm_audit_state_replay_overflow(struct xfrm_state * x,struct sk_buff * skb)3392 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
3393 struct sk_buff *skb)
3394 {
3395 struct audit_buffer *audit_buf;
3396 u32 spi;
3397
3398 audit_buf = xfrm_audit_start("SA-replay-overflow");
3399 if (audit_buf == NULL)
3400 return;
3401 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3402 /* don't record the sequence number because it's inherent in this kind
3403 * of audit message */
3404 spi = ntohl(x->id.spi);
3405 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
3406 audit_log_end(audit_buf);
3407 }
3408 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
3409
xfrm_audit_state_replay(struct xfrm_state * x,struct sk_buff * skb,__be32 net_seq)3410 void xfrm_audit_state_replay(struct xfrm_state *x,
3411 struct sk_buff *skb, __be32 net_seq)
3412 {
3413 struct audit_buffer *audit_buf;
3414 u32 spi;
3415
3416 audit_buf = xfrm_audit_start("SA-replayed-pkt");
3417 if (audit_buf == NULL)
3418 return;
3419 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3420 spi = ntohl(x->id.spi);
3421 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3422 spi, spi, ntohl(net_seq));
3423 audit_log_end(audit_buf);
3424 }
3425 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
3426
xfrm_audit_state_notfound_simple(struct sk_buff * skb,u16 family)3427 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
3428 {
3429 struct audit_buffer *audit_buf;
3430
3431 audit_buf = xfrm_audit_start("SA-notfound");
3432 if (audit_buf == NULL)
3433 return;
3434 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
3435 audit_log_end(audit_buf);
3436 }
3437 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
3438
xfrm_audit_state_notfound(struct sk_buff * skb,u16 family,__be32 net_spi,__be32 net_seq)3439 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
3440 __be32 net_spi, __be32 net_seq)
3441 {
3442 struct audit_buffer *audit_buf;
3443 u32 spi;
3444
3445 audit_buf = xfrm_audit_start("SA-notfound");
3446 if (audit_buf == NULL)
3447 return;
3448 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
3449 spi = ntohl(net_spi);
3450 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3451 spi, spi, ntohl(net_seq));
3452 audit_log_end(audit_buf);
3453 }
3454 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
3455
xfrm_audit_state_icvfail(struct xfrm_state * x,struct sk_buff * skb,u8 proto)3456 void xfrm_audit_state_icvfail(struct xfrm_state *x,
3457 struct sk_buff *skb, u8 proto)
3458 {
3459 struct audit_buffer *audit_buf;
3460 __be32 net_spi;
3461 __be32 net_seq;
3462
3463 audit_buf = xfrm_audit_start("SA-icv-failure");
3464 if (audit_buf == NULL)
3465 return;
3466 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3467 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
3468 u32 spi = ntohl(net_spi);
3469 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3470 spi, spi, ntohl(net_seq));
3471 }
3472 audit_log_end(audit_buf);
3473 }
3474 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
3475 #endif /* CONFIG_AUDITSYSCALL */
3476