1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * xfrm_state.c
4 *
5 * Changes:
6 * Mitsuru KANDA @USAGI
7 * Kazunori MIYAZAWA @USAGI
8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
9 * IPv6 support
10 * YOSHIFUJI Hideaki @USAGI
11 * Split up af-specific functions
12 * Derek Atkins <derek@ihtfp.com>
13 * Add UDP Encapsulation
14 *
15 */
16
17 #include <linux/compat.h>
18 #include <linux/workqueue.h>
19 #include <net/xfrm.h>
20 #include <linux/pfkeyv2.h>
21 #include <linux/ipsec.h>
22 #include <linux/module.h>
23 #include <linux/cache.h>
24 #include <linux/audit.h>
25 #include <linux/uaccess.h>
26 #include <linux/ktime.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/kernel.h>
30
31 #include <crypto/aead.h>
32
33 #include "xfrm_hash.h"
34
35 #define xfrm_state_deref_prot(table, net) \
36 rcu_dereference_protected((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
37 #define xfrm_state_deref_check(table, net) \
38 rcu_dereference_check((table), lockdep_is_held(&(net)->xfrm.xfrm_state_lock))
39
40 static void xfrm_state_gc_task(struct work_struct *work);
41
42 /* Each xfrm_state may be linked to two tables:
43
44 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl)
45 2. Hash table by (daddr,family,reqid) to find what SAs exist for given
46 destination/tunnel endpoint. (output)
47 */
48
49 static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
50 static struct kmem_cache *xfrm_state_cache __ro_after_init;
51
52 static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task);
53 static HLIST_HEAD(xfrm_state_gc_list);
54 static HLIST_HEAD(xfrm_state_dev_gc_list);
55
xfrm_state_hold_rcu(struct xfrm_state __rcu * x)56 static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x)
57 {
58 return refcount_inc_not_zero(&x->refcnt);
59 }
60
xfrm_dst_hash(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,u32 reqid,unsigned short family)61 static inline unsigned int xfrm_dst_hash(struct net *net,
62 const xfrm_address_t *daddr,
63 const xfrm_address_t *saddr,
64 u32 reqid,
65 unsigned short family)
66 {
67 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
68
69 return __xfrm_dst_hash(daddr, saddr, reqid, family, net->xfrm.state_hmask);
70 }
71
xfrm_src_hash(struct net * net,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)72 static inline unsigned int xfrm_src_hash(struct net *net,
73 const xfrm_address_t *daddr,
74 const xfrm_address_t *saddr,
75 unsigned short family)
76 {
77 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
78
79 return __xfrm_src_hash(daddr, saddr, family, net->xfrm.state_hmask);
80 }
81
82 static inline unsigned int
xfrm_spi_hash(struct net * net,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)83 xfrm_spi_hash(struct net *net, const xfrm_address_t *daddr,
84 __be32 spi, u8 proto, unsigned short family)
85 {
86 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
87
88 return __xfrm_spi_hash(daddr, spi, proto, family, net->xfrm.state_hmask);
89 }
90
xfrm_seq_hash(struct net * net,u32 seq)91 static unsigned int xfrm_seq_hash(struct net *net, u32 seq)
92 {
93 lockdep_assert_held(&net->xfrm.xfrm_state_lock);
94
95 return __xfrm_seq_hash(seq, net->xfrm.state_hmask);
96 }
97
98 #define XFRM_STATE_INSERT(by, _n, _h, _type) \
99 { \
100 struct xfrm_state *_x = NULL; \
101 \
102 if (_type != XFRM_DEV_OFFLOAD_PACKET) { \
103 hlist_for_each_entry_rcu(_x, _h, by) { \
104 if (_x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
105 continue; \
106 break; \
107 } \
108 } \
109 \
110 if (!_x || _x->xso.type == XFRM_DEV_OFFLOAD_PACKET) \
111 /* SAD is empty or consist from HW SAs only */ \
112 hlist_add_head_rcu(_n, _h); \
113 else \
114 hlist_add_before_rcu(_n, &_x->by); \
115 }
116
xfrm_hash_transfer(struct hlist_head * list,struct hlist_head * ndsttable,struct hlist_head * nsrctable,struct hlist_head * nspitable,struct hlist_head * nseqtable,unsigned int nhashmask)117 static void xfrm_hash_transfer(struct hlist_head *list,
118 struct hlist_head *ndsttable,
119 struct hlist_head *nsrctable,
120 struct hlist_head *nspitable,
121 struct hlist_head *nseqtable,
122 unsigned int nhashmask)
123 {
124 struct hlist_node *tmp;
125 struct xfrm_state *x;
126
127 hlist_for_each_entry_safe(x, tmp, list, bydst) {
128 unsigned int h;
129
130 h = __xfrm_dst_hash(&x->id.daddr, &x->props.saddr,
131 x->props.reqid, x->props.family,
132 nhashmask);
133 XFRM_STATE_INSERT(bydst, &x->bydst, ndsttable + h, x->xso.type);
134
135 h = __xfrm_src_hash(&x->id.daddr, &x->props.saddr,
136 x->props.family,
137 nhashmask);
138 XFRM_STATE_INSERT(bysrc, &x->bysrc, nsrctable + h, x->xso.type);
139
140 if (x->id.spi) {
141 h = __xfrm_spi_hash(&x->id.daddr, x->id.spi,
142 x->id.proto, x->props.family,
143 nhashmask);
144 XFRM_STATE_INSERT(byspi, &x->byspi, nspitable + h,
145 x->xso.type);
146 }
147
148 if (x->km.seq) {
149 h = __xfrm_seq_hash(x->km.seq, nhashmask);
150 XFRM_STATE_INSERT(byseq, &x->byseq, nseqtable + h,
151 x->xso.type);
152 }
153 }
154 }
155
xfrm_hash_new_size(unsigned int state_hmask)156 static unsigned long xfrm_hash_new_size(unsigned int state_hmask)
157 {
158 return ((state_hmask + 1) << 1) * sizeof(struct hlist_head);
159 }
160
xfrm_hash_resize(struct work_struct * work)161 static void xfrm_hash_resize(struct work_struct *work)
162 {
163 struct net *net = container_of(work, struct net, xfrm.state_hash_work);
164 struct hlist_head *ndst, *nsrc, *nspi, *nseq, *odst, *osrc, *ospi, *oseq;
165 unsigned long nsize, osize;
166 unsigned int nhashmask, ohashmask;
167 int i;
168
169 nsize = xfrm_hash_new_size(net->xfrm.state_hmask);
170 ndst = xfrm_hash_alloc(nsize);
171 if (!ndst)
172 return;
173 nsrc = xfrm_hash_alloc(nsize);
174 if (!nsrc) {
175 xfrm_hash_free(ndst, nsize);
176 return;
177 }
178 nspi = xfrm_hash_alloc(nsize);
179 if (!nspi) {
180 xfrm_hash_free(ndst, nsize);
181 xfrm_hash_free(nsrc, nsize);
182 return;
183 }
184 nseq = xfrm_hash_alloc(nsize);
185 if (!nseq) {
186 xfrm_hash_free(ndst, nsize);
187 xfrm_hash_free(nsrc, nsize);
188 xfrm_hash_free(nspi, nsize);
189 return;
190 }
191
192 spin_lock_bh(&net->xfrm.xfrm_state_lock);
193 write_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
194
195 nhashmask = (nsize / sizeof(struct hlist_head)) - 1U;
196 odst = xfrm_state_deref_prot(net->xfrm.state_bydst, net);
197 for (i = net->xfrm.state_hmask; i >= 0; i--)
198 xfrm_hash_transfer(odst + i, ndst, nsrc, nspi, nseq, nhashmask);
199
200 osrc = xfrm_state_deref_prot(net->xfrm.state_bysrc, net);
201 ospi = xfrm_state_deref_prot(net->xfrm.state_byspi, net);
202 oseq = xfrm_state_deref_prot(net->xfrm.state_byseq, net);
203 ohashmask = net->xfrm.state_hmask;
204
205 rcu_assign_pointer(net->xfrm.state_bydst, ndst);
206 rcu_assign_pointer(net->xfrm.state_bysrc, nsrc);
207 rcu_assign_pointer(net->xfrm.state_byspi, nspi);
208 rcu_assign_pointer(net->xfrm.state_byseq, nseq);
209 net->xfrm.state_hmask = nhashmask;
210
211 write_seqcount_end(&net->xfrm.xfrm_state_hash_generation);
212 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
213
214 osize = (ohashmask + 1) * sizeof(struct hlist_head);
215
216 synchronize_rcu();
217
218 xfrm_hash_free(odst, osize);
219 xfrm_hash_free(osrc, osize);
220 xfrm_hash_free(ospi, osize);
221 xfrm_hash_free(oseq, osize);
222 }
223
224 static DEFINE_SPINLOCK(xfrm_state_afinfo_lock);
225 static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO];
226
227 static DEFINE_SPINLOCK(xfrm_state_gc_lock);
228 static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock);
229
230 int __xfrm_state_delete(struct xfrm_state *x);
231
232 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
233 static bool km_is_alive(const struct km_event *c);
234 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
235
xfrm_register_type(const struct xfrm_type * type,unsigned short family)236 int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
237 {
238 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
239 int err = 0;
240
241 if (!afinfo)
242 return -EAFNOSUPPORT;
243
244 #define X(afi, T, name) do { \
245 WARN_ON((afi)->type_ ## name); \
246 (afi)->type_ ## name = (T); \
247 } while (0)
248
249 switch (type->proto) {
250 case IPPROTO_COMP:
251 X(afinfo, type, comp);
252 break;
253 case IPPROTO_AH:
254 X(afinfo, type, ah);
255 break;
256 case IPPROTO_ESP:
257 X(afinfo, type, esp);
258 break;
259 case IPPROTO_IPIP:
260 X(afinfo, type, ipip);
261 break;
262 case IPPROTO_DSTOPTS:
263 X(afinfo, type, dstopts);
264 break;
265 case IPPROTO_ROUTING:
266 X(afinfo, type, routing);
267 break;
268 case IPPROTO_IPV6:
269 X(afinfo, type, ipip6);
270 break;
271 default:
272 WARN_ON(1);
273 err = -EPROTONOSUPPORT;
274 break;
275 }
276 #undef X
277 rcu_read_unlock();
278 return err;
279 }
280 EXPORT_SYMBOL(xfrm_register_type);
281
xfrm_unregister_type(const struct xfrm_type * type,unsigned short family)282 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
283 {
284 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
285
286 if (unlikely(afinfo == NULL))
287 return;
288
289 #define X(afi, T, name) do { \
290 WARN_ON((afi)->type_ ## name != (T)); \
291 (afi)->type_ ## name = NULL; \
292 } while (0)
293
294 switch (type->proto) {
295 case IPPROTO_COMP:
296 X(afinfo, type, comp);
297 break;
298 case IPPROTO_AH:
299 X(afinfo, type, ah);
300 break;
301 case IPPROTO_ESP:
302 X(afinfo, type, esp);
303 break;
304 case IPPROTO_IPIP:
305 X(afinfo, type, ipip);
306 break;
307 case IPPROTO_DSTOPTS:
308 X(afinfo, type, dstopts);
309 break;
310 case IPPROTO_ROUTING:
311 X(afinfo, type, routing);
312 break;
313 case IPPROTO_IPV6:
314 X(afinfo, type, ipip6);
315 break;
316 default:
317 WARN_ON(1);
318 break;
319 }
320 #undef X
321 rcu_read_unlock();
322 }
323 EXPORT_SYMBOL(xfrm_unregister_type);
324
xfrm_get_type(u8 proto,unsigned short family)325 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
326 {
327 const struct xfrm_type *type = NULL;
328 struct xfrm_state_afinfo *afinfo;
329 int modload_attempted = 0;
330
331 retry:
332 afinfo = xfrm_state_get_afinfo(family);
333 if (unlikely(afinfo == NULL))
334 return NULL;
335
336 switch (proto) {
337 case IPPROTO_COMP:
338 type = afinfo->type_comp;
339 break;
340 case IPPROTO_AH:
341 type = afinfo->type_ah;
342 break;
343 case IPPROTO_ESP:
344 type = afinfo->type_esp;
345 break;
346 case IPPROTO_IPIP:
347 type = afinfo->type_ipip;
348 break;
349 case IPPROTO_DSTOPTS:
350 type = afinfo->type_dstopts;
351 break;
352 case IPPROTO_ROUTING:
353 type = afinfo->type_routing;
354 break;
355 case IPPROTO_IPV6:
356 type = afinfo->type_ipip6;
357 break;
358 default:
359 break;
360 }
361
362 if (unlikely(type && !try_module_get(type->owner)))
363 type = NULL;
364
365 rcu_read_unlock();
366
367 if (!type && !modload_attempted) {
368 request_module("xfrm-type-%d-%d", family, proto);
369 modload_attempted = 1;
370 goto retry;
371 }
372
373 return type;
374 }
375
xfrm_put_type(const struct xfrm_type * type)376 static void xfrm_put_type(const struct xfrm_type *type)
377 {
378 module_put(type->owner);
379 }
380
xfrm_register_type_offload(const struct xfrm_type_offload * type,unsigned short family)381 int xfrm_register_type_offload(const struct xfrm_type_offload *type,
382 unsigned short family)
383 {
384 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
385 int err = 0;
386
387 if (unlikely(afinfo == NULL))
388 return -EAFNOSUPPORT;
389
390 switch (type->proto) {
391 case IPPROTO_ESP:
392 WARN_ON(afinfo->type_offload_esp);
393 afinfo->type_offload_esp = type;
394 break;
395 default:
396 WARN_ON(1);
397 err = -EPROTONOSUPPORT;
398 break;
399 }
400
401 rcu_read_unlock();
402 return err;
403 }
404 EXPORT_SYMBOL(xfrm_register_type_offload);
405
xfrm_unregister_type_offload(const struct xfrm_type_offload * type,unsigned short family)406 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type,
407 unsigned short family)
408 {
409 struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
410
411 if (unlikely(afinfo == NULL))
412 return;
413
414 switch (type->proto) {
415 case IPPROTO_ESP:
416 WARN_ON(afinfo->type_offload_esp != type);
417 afinfo->type_offload_esp = NULL;
418 break;
419 default:
420 WARN_ON(1);
421 break;
422 }
423 rcu_read_unlock();
424 }
425 EXPORT_SYMBOL(xfrm_unregister_type_offload);
426
xfrm_set_type_offload(struct xfrm_state * x)427 void xfrm_set_type_offload(struct xfrm_state *x)
428 {
429 const struct xfrm_type_offload *type = NULL;
430 struct xfrm_state_afinfo *afinfo;
431 bool try_load = true;
432
433 retry:
434 afinfo = xfrm_state_get_afinfo(x->props.family);
435 if (unlikely(afinfo == NULL))
436 goto out;
437
438 switch (x->id.proto) {
439 case IPPROTO_ESP:
440 type = afinfo->type_offload_esp;
441 break;
442 default:
443 break;
444 }
445
446 if ((type && !try_module_get(type->owner)))
447 type = NULL;
448
449 rcu_read_unlock();
450
451 if (!type && try_load) {
452 request_module("xfrm-offload-%d-%d", x->props.family,
453 x->id.proto);
454 try_load = false;
455 goto retry;
456 }
457
458 out:
459 x->type_offload = type;
460 }
461 EXPORT_SYMBOL(xfrm_set_type_offload);
462
463 static const struct xfrm_mode xfrm4_mode_map[XFRM_MODE_MAX] = {
464 [XFRM_MODE_BEET] = {
465 .encap = XFRM_MODE_BEET,
466 .flags = XFRM_MODE_FLAG_TUNNEL,
467 .family = AF_INET,
468 },
469 [XFRM_MODE_TRANSPORT] = {
470 .encap = XFRM_MODE_TRANSPORT,
471 .family = AF_INET,
472 },
473 [XFRM_MODE_TUNNEL] = {
474 .encap = XFRM_MODE_TUNNEL,
475 .flags = XFRM_MODE_FLAG_TUNNEL,
476 .family = AF_INET,
477 },
478 [XFRM_MODE_IPTFS] = {
479 .encap = XFRM_MODE_IPTFS,
480 .flags = XFRM_MODE_FLAG_TUNNEL,
481 .family = AF_INET,
482 },
483 };
484
485 static const struct xfrm_mode xfrm6_mode_map[XFRM_MODE_MAX] = {
486 [XFRM_MODE_BEET] = {
487 .encap = XFRM_MODE_BEET,
488 .flags = XFRM_MODE_FLAG_TUNNEL,
489 .family = AF_INET6,
490 },
491 [XFRM_MODE_ROUTEOPTIMIZATION] = {
492 .encap = XFRM_MODE_ROUTEOPTIMIZATION,
493 .family = AF_INET6,
494 },
495 [XFRM_MODE_TRANSPORT] = {
496 .encap = XFRM_MODE_TRANSPORT,
497 .family = AF_INET6,
498 },
499 [XFRM_MODE_TUNNEL] = {
500 .encap = XFRM_MODE_TUNNEL,
501 .flags = XFRM_MODE_FLAG_TUNNEL,
502 .family = AF_INET6,
503 },
504 [XFRM_MODE_IPTFS] = {
505 .encap = XFRM_MODE_IPTFS,
506 .flags = XFRM_MODE_FLAG_TUNNEL,
507 .family = AF_INET6,
508 },
509 };
510
xfrm_get_mode(unsigned int encap,int family)511 static const struct xfrm_mode *xfrm_get_mode(unsigned int encap, int family)
512 {
513 const struct xfrm_mode *mode;
514
515 if (unlikely(encap >= XFRM_MODE_MAX))
516 return NULL;
517
518 switch (family) {
519 case AF_INET:
520 mode = &xfrm4_mode_map[encap];
521 if (mode->family == family)
522 return mode;
523 break;
524 case AF_INET6:
525 mode = &xfrm6_mode_map[encap];
526 if (mode->family == family)
527 return mode;
528 break;
529 default:
530 break;
531 }
532
533 return NULL;
534 }
535
536 static const struct xfrm_mode_cbs __rcu *xfrm_mode_cbs_map[XFRM_MODE_MAX];
537 static DEFINE_SPINLOCK(xfrm_mode_cbs_map_lock);
538
xfrm_register_mode_cbs(u8 mode,const struct xfrm_mode_cbs * mode_cbs)539 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs)
540 {
541 if (mode >= XFRM_MODE_MAX)
542 return -EINVAL;
543
544 spin_lock_bh(&xfrm_mode_cbs_map_lock);
545 rcu_assign_pointer(xfrm_mode_cbs_map[mode], mode_cbs);
546 spin_unlock_bh(&xfrm_mode_cbs_map_lock);
547
548 return 0;
549 }
550 EXPORT_SYMBOL(xfrm_register_mode_cbs);
551
xfrm_unregister_mode_cbs(u8 mode)552 void xfrm_unregister_mode_cbs(u8 mode)
553 {
554 if (mode >= XFRM_MODE_MAX)
555 return;
556
557 spin_lock_bh(&xfrm_mode_cbs_map_lock);
558 RCU_INIT_POINTER(xfrm_mode_cbs_map[mode], NULL);
559 spin_unlock_bh(&xfrm_mode_cbs_map_lock);
560 synchronize_rcu();
561 }
562 EXPORT_SYMBOL(xfrm_unregister_mode_cbs);
563
xfrm_get_mode_cbs(u8 mode)564 static const struct xfrm_mode_cbs *xfrm_get_mode_cbs(u8 mode)
565 {
566 const struct xfrm_mode_cbs *cbs;
567 bool try_load = true;
568
569 if (mode >= XFRM_MODE_MAX)
570 return NULL;
571
572 retry:
573 rcu_read_lock();
574
575 cbs = rcu_dereference(xfrm_mode_cbs_map[mode]);
576 if (cbs && !try_module_get(cbs->owner))
577 cbs = NULL;
578
579 rcu_read_unlock();
580
581 if (mode == XFRM_MODE_IPTFS && !cbs && try_load) {
582 request_module("xfrm-iptfs");
583 try_load = false;
584 goto retry;
585 }
586
587 return cbs;
588 }
589
xfrm_state_free(struct xfrm_state * x)590 void xfrm_state_free(struct xfrm_state *x)
591 {
592 kmem_cache_free(xfrm_state_cache, x);
593 }
594 EXPORT_SYMBOL(xfrm_state_free);
595
___xfrm_state_destroy(struct xfrm_state * x)596 static void ___xfrm_state_destroy(struct xfrm_state *x)
597 {
598 if (x->mode_cbs && x->mode_cbs->destroy_state)
599 x->mode_cbs->destroy_state(x);
600 hrtimer_cancel(&x->mtimer);
601 del_timer_sync(&x->rtimer);
602 kfree(x->aead);
603 kfree(x->aalg);
604 kfree(x->ealg);
605 kfree(x->calg);
606 kfree(x->encap);
607 kfree(x->coaddr);
608 kfree(x->replay_esn);
609 kfree(x->preplay_esn);
610 if (x->type) {
611 x->type->destructor(x);
612 xfrm_put_type(x->type);
613 }
614 if (x->xfrag.page)
615 put_page(x->xfrag.page);
616 xfrm_dev_state_free(x);
617 security_xfrm_state_free(x);
618 xfrm_state_free(x);
619 }
620
xfrm_state_gc_task(struct work_struct * work)621 static void xfrm_state_gc_task(struct work_struct *work)
622 {
623 struct xfrm_state *x;
624 struct hlist_node *tmp;
625 struct hlist_head gc_list;
626
627 spin_lock_bh(&xfrm_state_gc_lock);
628 hlist_move_list(&xfrm_state_gc_list, &gc_list);
629 spin_unlock_bh(&xfrm_state_gc_lock);
630
631 synchronize_rcu();
632
633 hlist_for_each_entry_safe(x, tmp, &gc_list, gclist)
634 ___xfrm_state_destroy(x);
635 }
636
xfrm_timer_handler(struct hrtimer * me)637 static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
638 {
639 struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer);
640 enum hrtimer_restart ret = HRTIMER_NORESTART;
641 time64_t now = ktime_get_real_seconds();
642 time64_t next = TIME64_MAX;
643 int warn = 0;
644 int err = 0;
645
646 spin_lock(&x->lock);
647 xfrm_dev_state_update_stats(x);
648
649 if (x->km.state == XFRM_STATE_DEAD)
650 goto out;
651 if (x->km.state == XFRM_STATE_EXPIRED)
652 goto expired;
653 if (x->lft.hard_add_expires_seconds) {
654 time64_t tmo = x->lft.hard_add_expires_seconds +
655 x->curlft.add_time - now;
656 if (tmo <= 0) {
657 if (x->xflags & XFRM_SOFT_EXPIRE) {
658 /* enter hard expire without soft expire first?!
659 * setting a new date could trigger this.
660 * workaround: fix x->curflt.add_time by below:
661 */
662 x->curlft.add_time = now - x->saved_tmo - 1;
663 tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
664 } else
665 goto expired;
666 }
667 if (tmo < next)
668 next = tmo;
669 }
670 if (x->lft.hard_use_expires_seconds) {
671 time64_t tmo = x->lft.hard_use_expires_seconds +
672 (READ_ONCE(x->curlft.use_time) ? : now) - now;
673 if (tmo <= 0)
674 goto expired;
675 if (tmo < next)
676 next = tmo;
677 }
678 if (x->km.dying)
679 goto resched;
680 if (x->lft.soft_add_expires_seconds) {
681 time64_t tmo = x->lft.soft_add_expires_seconds +
682 x->curlft.add_time - now;
683 if (tmo <= 0) {
684 warn = 1;
685 x->xflags &= ~XFRM_SOFT_EXPIRE;
686 } else if (tmo < next) {
687 next = tmo;
688 x->xflags |= XFRM_SOFT_EXPIRE;
689 x->saved_tmo = tmo;
690 }
691 }
692 if (x->lft.soft_use_expires_seconds) {
693 time64_t tmo = x->lft.soft_use_expires_seconds +
694 (READ_ONCE(x->curlft.use_time) ? : now) - now;
695 if (tmo <= 0)
696 warn = 1;
697 else if (tmo < next)
698 next = tmo;
699 }
700
701 x->km.dying = warn;
702 if (warn)
703 km_state_expired(x, 0, 0);
704 resched:
705 if (next != TIME64_MAX) {
706 hrtimer_forward_now(&x->mtimer, ktime_set(next, 0));
707 ret = HRTIMER_RESTART;
708 }
709
710 goto out;
711
712 expired:
713 if (x->km.state == XFRM_STATE_ACQ && x->id.spi == 0)
714 x->km.state = XFRM_STATE_EXPIRED;
715
716 err = __xfrm_state_delete(x);
717 if (!err)
718 km_state_expired(x, 1, 0);
719
720 xfrm_audit_state_delete(x, err ? 0 : 1, true);
721
722 out:
723 spin_unlock(&x->lock);
724 return ret;
725 }
726
727 static void xfrm_replay_timer_handler(struct timer_list *t);
728
xfrm_state_alloc(struct net * net)729 struct xfrm_state *xfrm_state_alloc(struct net *net)
730 {
731 struct xfrm_state *x;
732
733 x = kmem_cache_zalloc(xfrm_state_cache, GFP_ATOMIC);
734
735 if (x) {
736 write_pnet(&x->xs_net, net);
737 refcount_set(&x->refcnt, 1);
738 atomic_set(&x->tunnel_users, 0);
739 INIT_LIST_HEAD(&x->km.all);
740 INIT_HLIST_NODE(&x->state_cache);
741 INIT_HLIST_NODE(&x->bydst);
742 INIT_HLIST_NODE(&x->bysrc);
743 INIT_HLIST_NODE(&x->byspi);
744 INIT_HLIST_NODE(&x->byseq);
745 hrtimer_setup(&x->mtimer, xfrm_timer_handler, CLOCK_BOOTTIME,
746 HRTIMER_MODE_ABS_SOFT);
747 timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
748 x->curlft.add_time = ktime_get_real_seconds();
749 x->lft.soft_byte_limit = XFRM_INF;
750 x->lft.soft_packet_limit = XFRM_INF;
751 x->lft.hard_byte_limit = XFRM_INF;
752 x->lft.hard_packet_limit = XFRM_INF;
753 x->replay_maxage = 0;
754 x->replay_maxdiff = 0;
755 x->pcpu_num = UINT_MAX;
756 spin_lock_init(&x->lock);
757 x->mode_data = NULL;
758 }
759 return x;
760 }
761 EXPORT_SYMBOL(xfrm_state_alloc);
762
763 #ifdef CONFIG_XFRM_OFFLOAD
xfrm_dev_state_delete(struct xfrm_state * x)764 void xfrm_dev_state_delete(struct xfrm_state *x)
765 {
766 struct xfrm_dev_offload *xso = &x->xso;
767 struct net_device *dev = READ_ONCE(xso->dev);
768
769 if (dev) {
770 dev->xfrmdev_ops->xdo_dev_state_delete(x);
771 spin_lock_bh(&xfrm_state_dev_gc_lock);
772 hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list);
773 spin_unlock_bh(&xfrm_state_dev_gc_lock);
774 }
775 }
776 EXPORT_SYMBOL_GPL(xfrm_dev_state_delete);
777
xfrm_dev_state_free(struct xfrm_state * x)778 void xfrm_dev_state_free(struct xfrm_state *x)
779 {
780 struct xfrm_dev_offload *xso = &x->xso;
781 struct net_device *dev = READ_ONCE(xso->dev);
782
783 xfrm_unset_type_offload(x);
784
785 if (dev && dev->xfrmdev_ops) {
786 spin_lock_bh(&xfrm_state_dev_gc_lock);
787 if (!hlist_unhashed(&x->dev_gclist))
788 hlist_del(&x->dev_gclist);
789 spin_unlock_bh(&xfrm_state_dev_gc_lock);
790
791 if (dev->xfrmdev_ops->xdo_dev_state_free)
792 dev->xfrmdev_ops->xdo_dev_state_free(x);
793 WRITE_ONCE(xso->dev, NULL);
794 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
795 netdev_put(dev, &xso->dev_tracker);
796 }
797 }
798 #endif
799
__xfrm_state_destroy(struct xfrm_state * x,bool sync)800 void __xfrm_state_destroy(struct xfrm_state *x, bool sync)
801 {
802 WARN_ON(x->km.state != XFRM_STATE_DEAD);
803
804 if (sync) {
805 synchronize_rcu();
806 ___xfrm_state_destroy(x);
807 } else {
808 spin_lock_bh(&xfrm_state_gc_lock);
809 hlist_add_head(&x->gclist, &xfrm_state_gc_list);
810 spin_unlock_bh(&xfrm_state_gc_lock);
811 schedule_work(&xfrm_state_gc_work);
812 }
813 }
814 EXPORT_SYMBOL(__xfrm_state_destroy);
815
__xfrm_state_delete(struct xfrm_state * x)816 int __xfrm_state_delete(struct xfrm_state *x)
817 {
818 struct net *net = xs_net(x);
819 int err = -ESRCH;
820
821 if (x->km.state != XFRM_STATE_DEAD) {
822 x->km.state = XFRM_STATE_DEAD;
823
824 spin_lock(&net->xfrm.xfrm_state_lock);
825 list_del(&x->km.all);
826 hlist_del_rcu(&x->bydst);
827 hlist_del_rcu(&x->bysrc);
828 if (x->km.seq)
829 hlist_del_rcu(&x->byseq);
830 if (!hlist_unhashed(&x->state_cache))
831 hlist_del_rcu(&x->state_cache);
832 if (!hlist_unhashed(&x->state_cache_input))
833 hlist_del_rcu(&x->state_cache_input);
834
835 if (x->id.spi)
836 hlist_del_rcu(&x->byspi);
837 net->xfrm.state_num--;
838 xfrm_nat_keepalive_state_updated(x);
839 spin_unlock(&net->xfrm.xfrm_state_lock);
840
841 if (x->encap_sk)
842 sock_put(rcu_dereference_raw(x->encap_sk));
843
844 xfrm_dev_state_delete(x);
845
846 /* All xfrm_state objects are created by xfrm_state_alloc.
847 * The xfrm_state_alloc call gives a reference, and that
848 * is what we are dropping here.
849 */
850 xfrm_state_put(x);
851 err = 0;
852 }
853
854 return err;
855 }
856 EXPORT_SYMBOL(__xfrm_state_delete);
857
xfrm_state_delete(struct xfrm_state * x)858 int xfrm_state_delete(struct xfrm_state *x)
859 {
860 int err;
861
862 spin_lock_bh(&x->lock);
863 err = __xfrm_state_delete(x);
864 spin_unlock_bh(&x->lock);
865
866 return err;
867 }
868 EXPORT_SYMBOL(xfrm_state_delete);
869
870 #ifdef CONFIG_SECURITY_NETWORK_XFRM
871 static inline int
xfrm_state_flush_secctx_check(struct net * net,u8 proto,bool task_valid)872 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
873 {
874 int i, err = 0;
875
876 for (i = 0; i <= net->xfrm.state_hmask; i++) {
877 struct xfrm_state *x;
878
879 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
880 if (xfrm_id_proto_match(x->id.proto, proto) &&
881 (err = security_xfrm_state_delete(x)) != 0) {
882 xfrm_audit_state_delete(x, 0, task_valid);
883 return err;
884 }
885 }
886 }
887
888 return err;
889 }
890
891 static inline int
xfrm_dev_state_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)892 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
893 {
894 int i, err = 0;
895
896 for (i = 0; i <= net->xfrm.state_hmask; i++) {
897 struct xfrm_state *x;
898 struct xfrm_dev_offload *xso;
899
900 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
901 xso = &x->xso;
902
903 if (xso->dev == dev &&
904 (err = security_xfrm_state_delete(x)) != 0) {
905 xfrm_audit_state_delete(x, 0, task_valid);
906 return err;
907 }
908 }
909 }
910
911 return err;
912 }
913 #else
914 static inline int
xfrm_state_flush_secctx_check(struct net * net,u8 proto,bool task_valid)915 xfrm_state_flush_secctx_check(struct net *net, u8 proto, bool task_valid)
916 {
917 return 0;
918 }
919
920 static inline int
xfrm_dev_state_flush_secctx_check(struct net * net,struct net_device * dev,bool task_valid)921 xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool task_valid)
922 {
923 return 0;
924 }
925 #endif
926
xfrm_state_flush(struct net * net,u8 proto,bool task_valid,bool sync)927 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync)
928 {
929 int i, err = 0, cnt = 0;
930
931 spin_lock_bh(&net->xfrm.xfrm_state_lock);
932 err = xfrm_state_flush_secctx_check(net, proto, task_valid);
933 if (err)
934 goto out;
935
936 err = -ESRCH;
937 for (i = 0; i <= net->xfrm.state_hmask; i++) {
938 struct xfrm_state *x;
939 restart:
940 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
941 if (!xfrm_state_kern(x) &&
942 xfrm_id_proto_match(x->id.proto, proto)) {
943 xfrm_state_hold(x);
944 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
945
946 err = xfrm_state_delete(x);
947 xfrm_audit_state_delete(x, err ? 0 : 1,
948 task_valid);
949 if (sync)
950 xfrm_state_put_sync(x);
951 else
952 xfrm_state_put(x);
953 if (!err)
954 cnt++;
955
956 spin_lock_bh(&net->xfrm.xfrm_state_lock);
957 goto restart;
958 }
959 }
960 }
961 out:
962 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
963 if (cnt)
964 err = 0;
965
966 return err;
967 }
968 EXPORT_SYMBOL(xfrm_state_flush);
969
xfrm_dev_state_flush(struct net * net,struct net_device * dev,bool task_valid)970 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid)
971 {
972 struct xfrm_state *x;
973 struct hlist_node *tmp;
974 struct xfrm_dev_offload *xso;
975 int i, err = 0, cnt = 0;
976
977 spin_lock_bh(&net->xfrm.xfrm_state_lock);
978 err = xfrm_dev_state_flush_secctx_check(net, dev, task_valid);
979 if (err)
980 goto out;
981
982 err = -ESRCH;
983 for (i = 0; i <= net->xfrm.state_hmask; i++) {
984 restart:
985 hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) {
986 xso = &x->xso;
987
988 if (!xfrm_state_kern(x) && xso->dev == dev) {
989 xfrm_state_hold(x);
990 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
991
992 err = xfrm_state_delete(x);
993 xfrm_dev_state_free(x);
994
995 xfrm_audit_state_delete(x, err ? 0 : 1,
996 task_valid);
997 xfrm_state_put(x);
998 if (!err)
999 cnt++;
1000
1001 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1002 goto restart;
1003 }
1004 }
1005 }
1006 if (cnt)
1007 err = 0;
1008
1009 out:
1010 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1011
1012 spin_lock_bh(&xfrm_state_dev_gc_lock);
1013 restart_gc:
1014 hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) {
1015 xso = &x->xso;
1016
1017 if (xso->dev == dev) {
1018 spin_unlock_bh(&xfrm_state_dev_gc_lock);
1019 xfrm_dev_state_free(x);
1020 spin_lock_bh(&xfrm_state_dev_gc_lock);
1021 goto restart_gc;
1022 }
1023
1024 }
1025 spin_unlock_bh(&xfrm_state_dev_gc_lock);
1026
1027 xfrm_flush_gc();
1028
1029 return err;
1030 }
1031 EXPORT_SYMBOL(xfrm_dev_state_flush);
1032
xfrm_sad_getinfo(struct net * net,struct xfrmk_sadinfo * si)1033 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
1034 {
1035 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1036 si->sadcnt = net->xfrm.state_num;
1037 si->sadhcnt = net->xfrm.state_hmask + 1;
1038 si->sadhmcnt = xfrm_state_hashmax;
1039 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1040 }
1041 EXPORT_SYMBOL(xfrm_sad_getinfo);
1042
1043 static void
__xfrm4_init_tempsel(struct xfrm_selector * sel,const struct flowi * fl)1044 __xfrm4_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
1045 {
1046 const struct flowi4 *fl4 = &fl->u.ip4;
1047
1048 sel->daddr.a4 = fl4->daddr;
1049 sel->saddr.a4 = fl4->saddr;
1050 sel->dport = xfrm_flowi_dport(fl, &fl4->uli);
1051 sel->dport_mask = htons(0xffff);
1052 sel->sport = xfrm_flowi_sport(fl, &fl4->uli);
1053 sel->sport_mask = htons(0xffff);
1054 sel->family = AF_INET;
1055 sel->prefixlen_d = 32;
1056 sel->prefixlen_s = 32;
1057 sel->proto = fl4->flowi4_proto;
1058 sel->ifindex = fl4->flowi4_oif;
1059 }
1060
1061 static void
__xfrm6_init_tempsel(struct xfrm_selector * sel,const struct flowi * fl)1062 __xfrm6_init_tempsel(struct xfrm_selector *sel, const struct flowi *fl)
1063 {
1064 const struct flowi6 *fl6 = &fl->u.ip6;
1065
1066 /* Initialize temporary selector matching only to current session. */
1067 *(struct in6_addr *)&sel->daddr = fl6->daddr;
1068 *(struct in6_addr *)&sel->saddr = fl6->saddr;
1069 sel->dport = xfrm_flowi_dport(fl, &fl6->uli);
1070 sel->dport_mask = htons(0xffff);
1071 sel->sport = xfrm_flowi_sport(fl, &fl6->uli);
1072 sel->sport_mask = htons(0xffff);
1073 sel->family = AF_INET6;
1074 sel->prefixlen_d = 128;
1075 sel->prefixlen_s = 128;
1076 sel->proto = fl6->flowi6_proto;
1077 sel->ifindex = fl6->flowi6_oif;
1078 }
1079
1080 static void
xfrm_init_tempstate(struct xfrm_state * x,const struct flowi * fl,const struct xfrm_tmpl * tmpl,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)1081 xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
1082 const struct xfrm_tmpl *tmpl,
1083 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1084 unsigned short family)
1085 {
1086 switch (family) {
1087 case AF_INET:
1088 __xfrm4_init_tempsel(&x->sel, fl);
1089 break;
1090 case AF_INET6:
1091 __xfrm6_init_tempsel(&x->sel, fl);
1092 break;
1093 }
1094
1095 x->id = tmpl->id;
1096
1097 switch (tmpl->encap_family) {
1098 case AF_INET:
1099 if (x->id.daddr.a4 == 0)
1100 x->id.daddr.a4 = daddr->a4;
1101 x->props.saddr = tmpl->saddr;
1102 if (x->props.saddr.a4 == 0)
1103 x->props.saddr.a4 = saddr->a4;
1104 break;
1105 case AF_INET6:
1106 if (ipv6_addr_any((struct in6_addr *)&x->id.daddr))
1107 memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
1108 memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
1109 if (ipv6_addr_any((struct in6_addr *)&x->props.saddr))
1110 memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
1111 break;
1112 }
1113
1114 x->props.mode = tmpl->mode;
1115 x->props.reqid = tmpl->reqid;
1116 x->props.family = tmpl->encap_family;
1117 }
1118
1119 struct xfrm_hash_state_ptrs {
1120 const struct hlist_head *bydst;
1121 const struct hlist_head *bysrc;
1122 const struct hlist_head *byspi;
1123 unsigned int hmask;
1124 };
1125
xfrm_hash_ptrs_get(const struct net * net,struct xfrm_hash_state_ptrs * ptrs)1126 static void xfrm_hash_ptrs_get(const struct net *net, struct xfrm_hash_state_ptrs *ptrs)
1127 {
1128 unsigned int sequence;
1129
1130 do {
1131 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
1132
1133 ptrs->bydst = xfrm_state_deref_check(net->xfrm.state_bydst, net);
1134 ptrs->bysrc = xfrm_state_deref_check(net->xfrm.state_bysrc, net);
1135 ptrs->byspi = xfrm_state_deref_check(net->xfrm.state_byspi, net);
1136 ptrs->hmask = net->xfrm.state_hmask;
1137 } while (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence));
1138 }
1139
__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs * state_ptrs,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family,struct xfrm_dev_offload * xdo)1140 static struct xfrm_state *__xfrm_state_lookup_all(const struct xfrm_hash_state_ptrs *state_ptrs,
1141 u32 mark,
1142 const xfrm_address_t *daddr,
1143 __be32 spi, u8 proto,
1144 unsigned short family,
1145 struct xfrm_dev_offload *xdo)
1146 {
1147 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
1148 struct xfrm_state *x;
1149
1150 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
1151 #ifdef CONFIG_XFRM_OFFLOAD
1152 if (xdo->type == XFRM_DEV_OFFLOAD_PACKET) {
1153 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1154 /* HW states are in the head of list, there is
1155 * no need to iterate further.
1156 */
1157 break;
1158
1159 /* Packet offload: both policy and SA should
1160 * have same device.
1161 */
1162 if (xdo->dev != x->xso.dev)
1163 continue;
1164 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1165 /* Skip HW policy for SW lookups */
1166 continue;
1167 #endif
1168 if (x->props.family != family ||
1169 x->id.spi != spi ||
1170 x->id.proto != proto ||
1171 !xfrm_addr_equal(&x->id.daddr, daddr, family))
1172 continue;
1173
1174 if ((mark & x->mark.m) != x->mark.v)
1175 continue;
1176 if (!xfrm_state_hold_rcu(x))
1177 continue;
1178 return x;
1179 }
1180
1181 return NULL;
1182 }
1183
__xfrm_state_lookup(const struct xfrm_hash_state_ptrs * state_ptrs,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)1184 static struct xfrm_state *__xfrm_state_lookup(const struct xfrm_hash_state_ptrs *state_ptrs,
1185 u32 mark,
1186 const xfrm_address_t *daddr,
1187 __be32 spi, u8 proto,
1188 unsigned short family)
1189 {
1190 unsigned int h = __xfrm_spi_hash(daddr, spi, proto, family, state_ptrs->hmask);
1191 struct xfrm_state *x;
1192
1193 hlist_for_each_entry_rcu(x, state_ptrs->byspi + h, byspi) {
1194 if (x->props.family != family ||
1195 x->id.spi != spi ||
1196 x->id.proto != proto ||
1197 !xfrm_addr_equal(&x->id.daddr, daddr, family))
1198 continue;
1199
1200 if ((mark & x->mark.m) != x->mark.v)
1201 continue;
1202 if (!xfrm_state_hold_rcu(x))
1203 continue;
1204 return x;
1205 }
1206
1207 return NULL;
1208 }
1209
xfrm_input_state_lookup(struct net * net,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)1210 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
1211 const xfrm_address_t *daddr,
1212 __be32 spi, u8 proto,
1213 unsigned short family)
1214 {
1215 struct xfrm_hash_state_ptrs state_ptrs;
1216 struct hlist_head *state_cache_input;
1217 struct xfrm_state *x = NULL;
1218
1219 state_cache_input = raw_cpu_ptr(net->xfrm.state_cache_input);
1220
1221 rcu_read_lock();
1222 hlist_for_each_entry_rcu(x, state_cache_input, state_cache_input) {
1223 if (x->props.family != family ||
1224 x->id.spi != spi ||
1225 x->id.proto != proto ||
1226 !xfrm_addr_equal(&x->id.daddr, daddr, family))
1227 continue;
1228
1229 if ((mark & x->mark.m) != x->mark.v)
1230 continue;
1231 if (!xfrm_state_hold_rcu(x))
1232 continue;
1233 goto out;
1234 }
1235
1236 xfrm_hash_ptrs_get(net, &state_ptrs);
1237
1238 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
1239
1240 if (x && x->km.state == XFRM_STATE_VALID) {
1241 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1242 if (hlist_unhashed(&x->state_cache_input)) {
1243 hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
1244 } else {
1245 hlist_del_rcu(&x->state_cache_input);
1246 hlist_add_head_rcu(&x->state_cache_input, state_cache_input);
1247 }
1248 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1249 }
1250
1251 out:
1252 rcu_read_unlock();
1253 return x;
1254 }
1255 EXPORT_SYMBOL(xfrm_input_state_lookup);
1256
__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs * state_ptrs,u32 mark,const xfrm_address_t * daddr,const xfrm_address_t * saddr,u8 proto,unsigned short family)1257 static struct xfrm_state *__xfrm_state_lookup_byaddr(const struct xfrm_hash_state_ptrs *state_ptrs,
1258 u32 mark,
1259 const xfrm_address_t *daddr,
1260 const xfrm_address_t *saddr,
1261 u8 proto, unsigned short family)
1262 {
1263 unsigned int h = __xfrm_src_hash(daddr, saddr, family, state_ptrs->hmask);
1264 struct xfrm_state *x;
1265
1266 hlist_for_each_entry_rcu(x, state_ptrs->bysrc + h, bysrc) {
1267 if (x->props.family != family ||
1268 x->id.proto != proto ||
1269 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1270 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1271 continue;
1272
1273 if ((mark & x->mark.m) != x->mark.v)
1274 continue;
1275 if (!xfrm_state_hold_rcu(x))
1276 continue;
1277 return x;
1278 }
1279
1280 return NULL;
1281 }
1282
1283 static inline struct xfrm_state *
__xfrm_state_locate(struct xfrm_state * x,int use_spi,int family)1284 __xfrm_state_locate(struct xfrm_state *x, int use_spi, int family)
1285 {
1286 struct xfrm_hash_state_ptrs state_ptrs;
1287 struct net *net = xs_net(x);
1288 u32 mark = x->mark.v & x->mark.m;
1289
1290 xfrm_hash_ptrs_get(net, &state_ptrs);
1291
1292 if (use_spi)
1293 return __xfrm_state_lookup(&state_ptrs, mark, &x->id.daddr,
1294 x->id.spi, x->id.proto, family);
1295 else
1296 return __xfrm_state_lookup_byaddr(&state_ptrs, mark,
1297 &x->id.daddr,
1298 &x->props.saddr,
1299 x->id.proto, family);
1300 }
1301
xfrm_hash_grow_check(struct net * net,int have_hash_collision)1302 static void xfrm_hash_grow_check(struct net *net, int have_hash_collision)
1303 {
1304 if (have_hash_collision &&
1305 (net->xfrm.state_hmask + 1) < xfrm_state_hashmax &&
1306 net->xfrm.state_num > net->xfrm.state_hmask)
1307 schedule_work(&net->xfrm.state_hash_work);
1308 }
1309
xfrm_state_look_at(struct xfrm_policy * pol,struct xfrm_state * x,const struct flowi * fl,unsigned short family,struct xfrm_state ** best,int * acq_in_progress,int * error)1310 static void xfrm_state_look_at(struct xfrm_policy *pol, struct xfrm_state *x,
1311 const struct flowi *fl, unsigned short family,
1312 struct xfrm_state **best, int *acq_in_progress,
1313 int *error)
1314 {
1315 /* We need the cpu id just as a lookup key,
1316 * we don't require it to be stable.
1317 */
1318 unsigned int pcpu_id = get_cpu();
1319 put_cpu();
1320
1321 /* Resolution logic:
1322 * 1. There is a valid state with matching selector. Done.
1323 * 2. Valid state with inappropriate selector. Skip.
1324 *
1325 * Entering area of "sysdeps".
1326 *
1327 * 3. If state is not valid, selector is temporary, it selects
1328 * only session which triggered previous resolution. Key
1329 * manager will do something to install a state with proper
1330 * selector.
1331 */
1332 if (x->km.state == XFRM_STATE_VALID) {
1333 if ((x->sel.family &&
1334 (x->sel.family != family ||
1335 !xfrm_selector_match(&x->sel, fl, family))) ||
1336 !security_xfrm_state_pol_flow_match(x, pol,
1337 &fl->u.__fl_common))
1338 return;
1339
1340 if (x->pcpu_num != UINT_MAX && x->pcpu_num != pcpu_id)
1341 return;
1342
1343 if (!*best ||
1344 ((*best)->pcpu_num == UINT_MAX && x->pcpu_num == pcpu_id) ||
1345 (*best)->km.dying > x->km.dying ||
1346 ((*best)->km.dying == x->km.dying &&
1347 (*best)->curlft.add_time < x->curlft.add_time))
1348 *best = x;
1349 } else if (x->km.state == XFRM_STATE_ACQ) {
1350 if (!*best || x->pcpu_num == pcpu_id)
1351 *acq_in_progress = 1;
1352 } else if (x->km.state == XFRM_STATE_ERROR ||
1353 x->km.state == XFRM_STATE_EXPIRED) {
1354 if ((!x->sel.family ||
1355 (x->sel.family == family &&
1356 xfrm_selector_match(&x->sel, fl, family))) &&
1357 security_xfrm_state_pol_flow_match(x, pol,
1358 &fl->u.__fl_common))
1359 *error = -ESRCH;
1360 }
1361 }
1362
1363 struct xfrm_state *
xfrm_state_find(const xfrm_address_t * daddr,const xfrm_address_t * saddr,const struct flowi * fl,struct xfrm_tmpl * tmpl,struct xfrm_policy * pol,int * err,unsigned short family,u32 if_id)1364 xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1365 const struct flowi *fl, struct xfrm_tmpl *tmpl,
1366 struct xfrm_policy *pol, int *err,
1367 unsigned short family, u32 if_id)
1368 {
1369 static xfrm_address_t saddr_wildcard = { };
1370 struct xfrm_hash_state_ptrs state_ptrs;
1371 struct net *net = xp_net(pol);
1372 unsigned int h, h_wildcard;
1373 struct xfrm_state *x, *x0, *to_put;
1374 int acquire_in_progress = 0;
1375 int error = 0;
1376 struct xfrm_state *best = NULL;
1377 u32 mark = pol->mark.v & pol->mark.m;
1378 unsigned short encap_family = tmpl->encap_family;
1379 unsigned int sequence;
1380 struct km_event c;
1381 unsigned int pcpu_id;
1382 bool cached = false;
1383
1384 /* We need the cpu id just as a lookup key,
1385 * we don't require it to be stable.
1386 */
1387 pcpu_id = get_cpu();
1388 put_cpu();
1389
1390 to_put = NULL;
1391
1392 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation);
1393
1394 rcu_read_lock();
1395 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
1396 if (x->props.family == encap_family &&
1397 x->props.reqid == tmpl->reqid &&
1398 (mark & x->mark.m) == x->mark.v &&
1399 x->if_id == if_id &&
1400 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1401 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
1402 tmpl->mode == x->props.mode &&
1403 tmpl->id.proto == x->id.proto &&
1404 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1405 xfrm_state_look_at(pol, x, fl, encap_family,
1406 &best, &acquire_in_progress, &error);
1407 }
1408
1409 if (best)
1410 goto cached;
1411
1412 hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) {
1413 if (x->props.family == encap_family &&
1414 x->props.reqid == tmpl->reqid &&
1415 (mark & x->mark.m) == x->mark.v &&
1416 x->if_id == if_id &&
1417 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1418 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
1419 tmpl->mode == x->props.mode &&
1420 tmpl->id.proto == x->id.proto &&
1421 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1422 xfrm_state_look_at(pol, x, fl, family,
1423 &best, &acquire_in_progress, &error);
1424 }
1425
1426 cached:
1427 cached = true;
1428 if (best)
1429 goto found;
1430 else if (error)
1431 best = NULL;
1432 else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */
1433 WARN_ON(1);
1434
1435 xfrm_hash_ptrs_get(net, &state_ptrs);
1436
1437 h = __xfrm_dst_hash(daddr, saddr, tmpl->reqid, encap_family, state_ptrs.hmask);
1438 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h, bydst) {
1439 #ifdef CONFIG_XFRM_OFFLOAD
1440 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1441 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1442 /* HW states are in the head of list, there is
1443 * no need to iterate further.
1444 */
1445 break;
1446
1447 /* Packet offload: both policy and SA should
1448 * have same device.
1449 */
1450 if (pol->xdo.dev != x->xso.dev)
1451 continue;
1452 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1453 /* Skip HW policy for SW lookups */
1454 continue;
1455 #endif
1456 if (x->props.family == encap_family &&
1457 x->props.reqid == tmpl->reqid &&
1458 (mark & x->mark.m) == x->mark.v &&
1459 x->if_id == if_id &&
1460 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1461 xfrm_state_addr_check(x, daddr, saddr, encap_family) &&
1462 tmpl->mode == x->props.mode &&
1463 tmpl->id.proto == x->id.proto &&
1464 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1465 xfrm_state_look_at(pol, x, fl, family,
1466 &best, &acquire_in_progress, &error);
1467 }
1468 if (best || acquire_in_progress)
1469 goto found;
1470
1471 h_wildcard = __xfrm_dst_hash(daddr, &saddr_wildcard, tmpl->reqid,
1472 encap_family, state_ptrs.hmask);
1473 hlist_for_each_entry_rcu(x, state_ptrs.bydst + h_wildcard, bydst) {
1474 #ifdef CONFIG_XFRM_OFFLOAD
1475 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1476 if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
1477 /* HW states are in the head of list, there is
1478 * no need to iterate further.
1479 */
1480 break;
1481
1482 /* Packet offload: both policy and SA should
1483 * have same device.
1484 */
1485 if (pol->xdo.dev != x->xso.dev)
1486 continue;
1487 } else if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
1488 /* Skip HW policy for SW lookups */
1489 continue;
1490 #endif
1491 if (x->props.family == encap_family &&
1492 x->props.reqid == tmpl->reqid &&
1493 (mark & x->mark.m) == x->mark.v &&
1494 x->if_id == if_id &&
1495 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1496 xfrm_addr_equal(&x->id.daddr, daddr, encap_family) &&
1497 tmpl->mode == x->props.mode &&
1498 tmpl->id.proto == x->id.proto &&
1499 (tmpl->id.spi == x->id.spi || !tmpl->id.spi))
1500 xfrm_state_look_at(pol, x, fl, family,
1501 &best, &acquire_in_progress, &error);
1502 }
1503
1504 found:
1505 if (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) ||
1506 (best && (best->pcpu_num == pcpu_id)))
1507 x = best;
1508
1509 if (!x && !error && !acquire_in_progress) {
1510 if (tmpl->id.spi &&
1511 (x0 = __xfrm_state_lookup_all(&state_ptrs, mark, daddr,
1512 tmpl->id.spi, tmpl->id.proto,
1513 encap_family,
1514 &pol->xdo)) != NULL) {
1515 to_put = x0;
1516 error = -EEXIST;
1517 goto out;
1518 }
1519
1520 c.net = net;
1521 /* If the KMs have no listeners (yet...), avoid allocating an SA
1522 * for each and every packet - garbage collection might not
1523 * handle the flood.
1524 */
1525 if (!km_is_alive(&c)) {
1526 error = -ESRCH;
1527 goto out;
1528 }
1529
1530 x = xfrm_state_alloc(net);
1531 if (x == NULL) {
1532 error = -ENOMEM;
1533 goto out;
1534 }
1535 /* Initialize temporary state matching only
1536 * to current session. */
1537 xfrm_init_tempstate(x, fl, tmpl, daddr, saddr, family);
1538 memcpy(&x->mark, &pol->mark, sizeof(x->mark));
1539 x->if_id = if_id;
1540 if ((pol->flags & XFRM_POLICY_CPU_ACQUIRE) && best)
1541 x->pcpu_num = pcpu_id;
1542
1543 error = security_xfrm_state_alloc_acquire(x, pol->security, fl->flowi_secid);
1544 if (error) {
1545 x->km.state = XFRM_STATE_DEAD;
1546 to_put = x;
1547 x = NULL;
1548 goto out;
1549 }
1550 #ifdef CONFIG_XFRM_OFFLOAD
1551 if (pol->xdo.type == XFRM_DEV_OFFLOAD_PACKET) {
1552 struct xfrm_dev_offload *xdo = &pol->xdo;
1553 struct xfrm_dev_offload *xso = &x->xso;
1554
1555 xso->type = XFRM_DEV_OFFLOAD_PACKET;
1556 xso->dir = xdo->dir;
1557 xso->dev = xdo->dev;
1558 xso->real_dev = xdo->real_dev;
1559 xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ;
1560 netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC);
1561 error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL);
1562 if (error) {
1563 xso->dir = 0;
1564 netdev_put(xso->dev, &xso->dev_tracker);
1565 xso->dev = NULL;
1566 xso->real_dev = NULL;
1567 xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
1568 x->km.state = XFRM_STATE_DEAD;
1569 to_put = x;
1570 x = NULL;
1571 goto out;
1572 }
1573 }
1574 #endif
1575 if (km_query(x, tmpl, pol) == 0) {
1576 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1577 x->km.state = XFRM_STATE_ACQ;
1578 x->dir = XFRM_SA_DIR_OUT;
1579 list_add(&x->km.all, &net->xfrm.state_all);
1580 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family);
1581 XFRM_STATE_INSERT(bydst, &x->bydst,
1582 net->xfrm.state_bydst + h,
1583 x->xso.type);
1584 h = xfrm_src_hash(net, daddr, saddr, encap_family);
1585 XFRM_STATE_INSERT(bysrc, &x->bysrc,
1586 net->xfrm.state_bysrc + h,
1587 x->xso.type);
1588 INIT_HLIST_NODE(&x->state_cache);
1589 if (x->id.spi) {
1590 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family);
1591 XFRM_STATE_INSERT(byspi, &x->byspi,
1592 net->xfrm.state_byspi + h,
1593 x->xso.type);
1594 }
1595 if (x->km.seq) {
1596 h = xfrm_seq_hash(net, x->km.seq);
1597 XFRM_STATE_INSERT(byseq, &x->byseq,
1598 net->xfrm.state_byseq + h,
1599 x->xso.type);
1600 }
1601 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1602 hrtimer_start(&x->mtimer,
1603 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1604 HRTIMER_MODE_REL_SOFT);
1605 net->xfrm.state_num++;
1606 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1607 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1608 } else {
1609 #ifdef CONFIG_XFRM_OFFLOAD
1610 struct xfrm_dev_offload *xso = &x->xso;
1611
1612 if (xso->type == XFRM_DEV_OFFLOAD_PACKET) {
1613 xfrm_dev_state_delete(x);
1614 xfrm_dev_state_free(x);
1615 }
1616 #endif
1617 x->km.state = XFRM_STATE_DEAD;
1618 to_put = x;
1619 x = NULL;
1620 error = -ESRCH;
1621 }
1622
1623 /* Use the already installed 'fallback' while the CPU-specific
1624 * SA acquire is handled*/
1625 if (best)
1626 x = best;
1627 }
1628 out:
1629 if (x) {
1630 if (!xfrm_state_hold_rcu(x)) {
1631 *err = -EAGAIN;
1632 x = NULL;
1633 }
1634 } else {
1635 *err = acquire_in_progress ? -EAGAIN : error;
1636 }
1637
1638 if (x && x->km.state == XFRM_STATE_VALID && !cached &&
1639 (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || x->pcpu_num == pcpu_id)) {
1640 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1641 if (hlist_unhashed(&x->state_cache))
1642 hlist_add_head_rcu(&x->state_cache, &pol->state_cache_list);
1643 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1644 }
1645
1646 rcu_read_unlock();
1647 if (to_put)
1648 xfrm_state_put(to_put);
1649
1650 if (read_seqcount_retry(&net->xfrm.xfrm_state_hash_generation, sequence)) {
1651 *err = -EAGAIN;
1652 if (x) {
1653 xfrm_state_put(x);
1654 x = NULL;
1655 }
1656 }
1657
1658 return x;
1659 }
1660
1661 struct xfrm_state *
xfrm_stateonly_find(struct net * net,u32 mark,u32 if_id,xfrm_address_t * daddr,xfrm_address_t * saddr,unsigned short family,u8 mode,u8 proto,u32 reqid)1662 xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1663 xfrm_address_t *daddr, xfrm_address_t *saddr,
1664 unsigned short family, u8 mode, u8 proto, u32 reqid)
1665 {
1666 unsigned int h;
1667 struct xfrm_state *rx = NULL, *x = NULL;
1668
1669 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1670 h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1671 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1672 if (x->props.family == family &&
1673 x->props.reqid == reqid &&
1674 (mark & x->mark.m) == x->mark.v &&
1675 x->if_id == if_id &&
1676 !(x->props.flags & XFRM_STATE_WILDRECV) &&
1677 xfrm_state_addr_check(x, daddr, saddr, family) &&
1678 mode == x->props.mode &&
1679 proto == x->id.proto &&
1680 x->km.state == XFRM_STATE_VALID) {
1681 rx = x;
1682 break;
1683 }
1684 }
1685
1686 if (rx)
1687 xfrm_state_hold(rx);
1688 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1689
1690
1691 return rx;
1692 }
1693 EXPORT_SYMBOL(xfrm_stateonly_find);
1694
xfrm_state_lookup_byspi(struct net * net,__be32 spi,unsigned short family)1695 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1696 unsigned short family)
1697 {
1698 struct xfrm_state *x;
1699 struct xfrm_state_walk *w;
1700
1701 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1702 list_for_each_entry(w, &net->xfrm.state_all, all) {
1703 x = container_of(w, struct xfrm_state, km);
1704 if (x->props.family != family ||
1705 x->id.spi != spi)
1706 continue;
1707
1708 xfrm_state_hold(x);
1709 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1710 return x;
1711 }
1712 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1713 return NULL;
1714 }
1715 EXPORT_SYMBOL(xfrm_state_lookup_byspi);
1716
__xfrm_state_insert(struct xfrm_state * x)1717 static void __xfrm_state_insert(struct xfrm_state *x)
1718 {
1719 struct net *net = xs_net(x);
1720 unsigned int h;
1721
1722 list_add(&x->km.all, &net->xfrm.state_all);
1723
1724 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
1725 x->props.reqid, x->props.family);
1726 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
1727 x->xso.type);
1728
1729 h = xfrm_src_hash(net, &x->id.daddr, &x->props.saddr, x->props.family);
1730 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
1731 x->xso.type);
1732
1733 if (x->id.spi) {
1734 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto,
1735 x->props.family);
1736
1737 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
1738 x->xso.type);
1739 }
1740
1741 if (x->km.seq) {
1742 h = xfrm_seq_hash(net, x->km.seq);
1743
1744 XFRM_STATE_INSERT(byseq, &x->byseq, net->xfrm.state_byseq + h,
1745 x->xso.type);
1746 }
1747
1748 hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
1749 if (x->replay_maxage)
1750 mod_timer(&x->rtimer, jiffies + x->replay_maxage);
1751
1752 net->xfrm.state_num++;
1753
1754 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1755 xfrm_nat_keepalive_state_updated(x);
1756 }
1757
1758 /* net->xfrm.xfrm_state_lock is held */
__xfrm_state_bump_genids(struct xfrm_state * xnew)1759 static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
1760 {
1761 struct net *net = xs_net(xnew);
1762 unsigned short family = xnew->props.family;
1763 u32 reqid = xnew->props.reqid;
1764 struct xfrm_state *x;
1765 unsigned int h;
1766 u32 mark = xnew->mark.v & xnew->mark.m;
1767 u32 if_id = xnew->if_id;
1768 u32 cpu_id = xnew->pcpu_num;
1769
1770 h = xfrm_dst_hash(net, &xnew->id.daddr, &xnew->props.saddr, reqid, family);
1771 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1772 if (x->props.family == family &&
1773 x->props.reqid == reqid &&
1774 x->if_id == if_id &&
1775 x->pcpu_num == cpu_id &&
1776 (mark & x->mark.m) == x->mark.v &&
1777 xfrm_addr_equal(&x->id.daddr, &xnew->id.daddr, family) &&
1778 xfrm_addr_equal(&x->props.saddr, &xnew->props.saddr, family))
1779 x->genid++;
1780 }
1781 }
1782
xfrm_state_insert(struct xfrm_state * x)1783 void xfrm_state_insert(struct xfrm_state *x)
1784 {
1785 struct net *net = xs_net(x);
1786
1787 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1788 __xfrm_state_bump_genids(x);
1789 __xfrm_state_insert(x);
1790 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1791 }
1792 EXPORT_SYMBOL(xfrm_state_insert);
1793
1794 /* net->xfrm.xfrm_state_lock is held */
__find_acq_core(struct net * net,const struct xfrm_mark * m,unsigned short family,u8 mode,u32 reqid,u32 if_id,u32 pcpu_num,u8 proto,const xfrm_address_t * daddr,const xfrm_address_t * saddr,int create)1795 static struct xfrm_state *__find_acq_core(struct net *net,
1796 const struct xfrm_mark *m,
1797 unsigned short family, u8 mode,
1798 u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
1799 const xfrm_address_t *daddr,
1800 const xfrm_address_t *saddr,
1801 int create)
1802 {
1803 unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
1804 struct xfrm_state *x;
1805 u32 mark = m->v & m->m;
1806
1807 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
1808 if (x->props.reqid != reqid ||
1809 x->props.mode != mode ||
1810 x->props.family != family ||
1811 x->km.state != XFRM_STATE_ACQ ||
1812 x->id.spi != 0 ||
1813 x->id.proto != proto ||
1814 (mark & x->mark.m) != x->mark.v ||
1815 x->pcpu_num != pcpu_num ||
1816 !xfrm_addr_equal(&x->id.daddr, daddr, family) ||
1817 !xfrm_addr_equal(&x->props.saddr, saddr, family))
1818 continue;
1819
1820 xfrm_state_hold(x);
1821 return x;
1822 }
1823
1824 if (!create)
1825 return NULL;
1826
1827 x = xfrm_state_alloc(net);
1828 if (likely(x)) {
1829 switch (family) {
1830 case AF_INET:
1831 x->sel.daddr.a4 = daddr->a4;
1832 x->sel.saddr.a4 = saddr->a4;
1833 x->sel.prefixlen_d = 32;
1834 x->sel.prefixlen_s = 32;
1835 x->props.saddr.a4 = saddr->a4;
1836 x->id.daddr.a4 = daddr->a4;
1837 break;
1838
1839 case AF_INET6:
1840 x->sel.daddr.in6 = daddr->in6;
1841 x->sel.saddr.in6 = saddr->in6;
1842 x->sel.prefixlen_d = 128;
1843 x->sel.prefixlen_s = 128;
1844 x->props.saddr.in6 = saddr->in6;
1845 x->id.daddr.in6 = daddr->in6;
1846 break;
1847 }
1848
1849 x->pcpu_num = pcpu_num;
1850 x->km.state = XFRM_STATE_ACQ;
1851 x->id.proto = proto;
1852 x->props.family = family;
1853 x->props.mode = mode;
1854 x->props.reqid = reqid;
1855 x->if_id = if_id;
1856 x->mark.v = m->v;
1857 x->mark.m = m->m;
1858 x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
1859 xfrm_state_hold(x);
1860 hrtimer_start(&x->mtimer,
1861 ktime_set(net->xfrm.sysctl_acq_expires, 0),
1862 HRTIMER_MODE_REL_SOFT);
1863 list_add(&x->km.all, &net->xfrm.state_all);
1864 XFRM_STATE_INSERT(bydst, &x->bydst, net->xfrm.state_bydst + h,
1865 x->xso.type);
1866 h = xfrm_src_hash(net, daddr, saddr, family);
1867 XFRM_STATE_INSERT(bysrc, &x->bysrc, net->xfrm.state_bysrc + h,
1868 x->xso.type);
1869
1870 net->xfrm.state_num++;
1871
1872 xfrm_hash_grow_check(net, x->bydst.next != NULL);
1873 }
1874
1875 return x;
1876 }
1877
1878 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
1879
xfrm_state_add(struct xfrm_state * x)1880 int xfrm_state_add(struct xfrm_state *x)
1881 {
1882 struct net *net = xs_net(x);
1883 struct xfrm_state *x1, *to_put;
1884 int family;
1885 int err;
1886 u32 mark = x->mark.v & x->mark.m;
1887 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
1888
1889 family = x->props.family;
1890
1891 to_put = NULL;
1892
1893 spin_lock_bh(&net->xfrm.xfrm_state_lock);
1894
1895 x1 = __xfrm_state_locate(x, use_spi, family);
1896 if (x1) {
1897 to_put = x1;
1898 x1 = NULL;
1899 err = -EEXIST;
1900 goto out;
1901 }
1902
1903 if (use_spi && x->km.seq) {
1904 x1 = __xfrm_find_acq_byseq(net, mark, x->km.seq, x->pcpu_num);
1905 if (x1 && ((x1->id.proto != x->id.proto) ||
1906 !xfrm_addr_equal(&x1->id.daddr, &x->id.daddr, family))) {
1907 to_put = x1;
1908 x1 = NULL;
1909 }
1910 }
1911
1912 if (use_spi && !x1)
1913 x1 = __find_acq_core(net, &x->mark, family, x->props.mode,
1914 x->props.reqid, x->if_id, x->pcpu_num, x->id.proto,
1915 &x->id.daddr, &x->props.saddr, 0);
1916
1917 __xfrm_state_bump_genids(x);
1918 __xfrm_state_insert(x);
1919 err = 0;
1920
1921 out:
1922 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
1923
1924 if (x1) {
1925 xfrm_state_delete(x1);
1926 xfrm_state_put(x1);
1927 }
1928
1929 if (to_put)
1930 xfrm_state_put(to_put);
1931
1932 return err;
1933 }
1934 EXPORT_SYMBOL(xfrm_state_add);
1935
1936 #ifdef CONFIG_XFRM_MIGRATE
clone_security(struct xfrm_state * x,struct xfrm_sec_ctx * security)1937 static inline int clone_security(struct xfrm_state *x, struct xfrm_sec_ctx *security)
1938 {
1939 struct xfrm_user_sec_ctx *uctx;
1940 int size = sizeof(*uctx) + security->ctx_len;
1941 int err;
1942
1943 uctx = kmalloc(size, GFP_KERNEL);
1944 if (!uctx)
1945 return -ENOMEM;
1946
1947 uctx->exttype = XFRMA_SEC_CTX;
1948 uctx->len = size;
1949 uctx->ctx_doi = security->ctx_doi;
1950 uctx->ctx_alg = security->ctx_alg;
1951 uctx->ctx_len = security->ctx_len;
1952 memcpy(uctx + 1, security->ctx_str, security->ctx_len);
1953 err = security_xfrm_state_alloc(x, uctx);
1954 kfree(uctx);
1955 if (err)
1956 return err;
1957
1958 return 0;
1959 }
1960
xfrm_state_clone(struct xfrm_state * orig,struct xfrm_encap_tmpl * encap)1961 static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1962 struct xfrm_encap_tmpl *encap)
1963 {
1964 struct net *net = xs_net(orig);
1965 struct xfrm_state *x = xfrm_state_alloc(net);
1966 if (!x)
1967 goto out;
1968
1969 memcpy(&x->id, &orig->id, sizeof(x->id));
1970 memcpy(&x->sel, &orig->sel, sizeof(x->sel));
1971 memcpy(&x->lft, &orig->lft, sizeof(x->lft));
1972 x->props.mode = orig->props.mode;
1973 x->props.replay_window = orig->props.replay_window;
1974 x->props.reqid = orig->props.reqid;
1975 x->props.family = orig->props.family;
1976 x->props.saddr = orig->props.saddr;
1977
1978 if (orig->aalg) {
1979 x->aalg = xfrm_algo_auth_clone(orig->aalg);
1980 if (!x->aalg)
1981 goto error;
1982 }
1983 x->props.aalgo = orig->props.aalgo;
1984
1985 if (orig->aead) {
1986 x->aead = xfrm_algo_aead_clone(orig->aead);
1987 x->geniv = orig->geniv;
1988 if (!x->aead)
1989 goto error;
1990 }
1991 if (orig->ealg) {
1992 x->ealg = xfrm_algo_clone(orig->ealg);
1993 if (!x->ealg)
1994 goto error;
1995 }
1996 x->props.ealgo = orig->props.ealgo;
1997
1998 if (orig->calg) {
1999 x->calg = xfrm_algo_clone(orig->calg);
2000 if (!x->calg)
2001 goto error;
2002 }
2003 x->props.calgo = orig->props.calgo;
2004
2005 if (encap || orig->encap) {
2006 if (encap)
2007 x->encap = kmemdup(encap, sizeof(*x->encap),
2008 GFP_KERNEL);
2009 else
2010 x->encap = kmemdup(orig->encap, sizeof(*x->encap),
2011 GFP_KERNEL);
2012
2013 if (!x->encap)
2014 goto error;
2015 }
2016
2017 if (orig->security)
2018 if (clone_security(x, orig->security))
2019 goto error;
2020
2021 if (orig->coaddr) {
2022 x->coaddr = kmemdup(orig->coaddr, sizeof(*x->coaddr),
2023 GFP_KERNEL);
2024 if (!x->coaddr)
2025 goto error;
2026 }
2027
2028 if (orig->replay_esn) {
2029 if (xfrm_replay_clone(x, orig))
2030 goto error;
2031 }
2032
2033 memcpy(&x->mark, &orig->mark, sizeof(x->mark));
2034 memcpy(&x->props.smark, &orig->props.smark, sizeof(x->props.smark));
2035
2036 x->props.flags = orig->props.flags;
2037 x->props.extra_flags = orig->props.extra_flags;
2038
2039 x->pcpu_num = orig->pcpu_num;
2040 x->if_id = orig->if_id;
2041 x->tfcpad = orig->tfcpad;
2042 x->replay_maxdiff = orig->replay_maxdiff;
2043 x->replay_maxage = orig->replay_maxage;
2044 memcpy(&x->curlft, &orig->curlft, sizeof(x->curlft));
2045 x->km.state = orig->km.state;
2046 x->km.seq = orig->km.seq;
2047 x->replay = orig->replay;
2048 x->preplay = orig->preplay;
2049 x->mapping_maxage = orig->mapping_maxage;
2050 x->lastused = orig->lastused;
2051 x->new_mapping = 0;
2052 x->new_mapping_sport = 0;
2053 x->dir = orig->dir;
2054
2055 x->mode_cbs = orig->mode_cbs;
2056 if (x->mode_cbs && x->mode_cbs->clone_state) {
2057 if (x->mode_cbs->clone_state(x, orig))
2058 goto error;
2059 }
2060
2061 return x;
2062
2063 error:
2064 xfrm_state_put(x);
2065 out:
2066 return NULL;
2067 }
2068
xfrm_migrate_state_find(struct xfrm_migrate * m,struct net * net,u32 if_id)2069 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
2070 u32 if_id)
2071 {
2072 unsigned int h;
2073 struct xfrm_state *x = NULL;
2074
2075 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2076
2077 if (m->reqid) {
2078 h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
2079 m->reqid, m->old_family);
2080 hlist_for_each_entry(x, net->xfrm.state_bydst+h, bydst) {
2081 if (x->props.mode != m->mode ||
2082 x->id.proto != m->proto)
2083 continue;
2084 if (m->reqid && x->props.reqid != m->reqid)
2085 continue;
2086 if (if_id != 0 && x->if_id != if_id)
2087 continue;
2088 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
2089 m->old_family) ||
2090 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
2091 m->old_family))
2092 continue;
2093 xfrm_state_hold(x);
2094 break;
2095 }
2096 } else {
2097 h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
2098 m->old_family);
2099 hlist_for_each_entry(x, net->xfrm.state_bysrc+h, bysrc) {
2100 if (x->props.mode != m->mode ||
2101 x->id.proto != m->proto)
2102 continue;
2103 if (if_id != 0 && x->if_id != if_id)
2104 continue;
2105 if (!xfrm_addr_equal(&x->id.daddr, &m->old_daddr,
2106 m->old_family) ||
2107 !xfrm_addr_equal(&x->props.saddr, &m->old_saddr,
2108 m->old_family))
2109 continue;
2110 xfrm_state_hold(x);
2111 break;
2112 }
2113 }
2114
2115 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2116
2117 return x;
2118 }
2119 EXPORT_SYMBOL(xfrm_migrate_state_find);
2120
xfrm_state_migrate(struct xfrm_state * x,struct xfrm_migrate * m,struct xfrm_encap_tmpl * encap)2121 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
2122 struct xfrm_migrate *m,
2123 struct xfrm_encap_tmpl *encap)
2124 {
2125 struct xfrm_state *xc;
2126
2127 xc = xfrm_state_clone(x, encap);
2128 if (!xc)
2129 return NULL;
2130
2131 xc->props.family = m->new_family;
2132
2133 if (xfrm_init_state(xc) < 0)
2134 goto error;
2135
2136 memcpy(&xc->id.daddr, &m->new_daddr, sizeof(xc->id.daddr));
2137 memcpy(&xc->props.saddr, &m->new_saddr, sizeof(xc->props.saddr));
2138
2139 /* add state */
2140 if (xfrm_addr_equal(&x->id.daddr, &m->new_daddr, m->new_family)) {
2141 /* a care is needed when the destination address of the
2142 state is to be updated as it is a part of triplet */
2143 xfrm_state_insert(xc);
2144 } else {
2145 if (xfrm_state_add(xc) < 0)
2146 goto error;
2147 }
2148
2149 return xc;
2150 error:
2151 xfrm_state_put(xc);
2152 return NULL;
2153 }
2154 EXPORT_SYMBOL(xfrm_state_migrate);
2155 #endif
2156
xfrm_state_update(struct xfrm_state * x)2157 int xfrm_state_update(struct xfrm_state *x)
2158 {
2159 struct xfrm_state *x1, *to_put;
2160 int err;
2161 int use_spi = xfrm_id_proto_match(x->id.proto, IPSEC_PROTO_ANY);
2162 struct net *net = xs_net(x);
2163
2164 to_put = NULL;
2165
2166 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2167 x1 = __xfrm_state_locate(x, use_spi, x->props.family);
2168
2169 err = -ESRCH;
2170 if (!x1)
2171 goto out;
2172
2173 if (xfrm_state_kern(x1)) {
2174 to_put = x1;
2175 err = -EEXIST;
2176 goto out;
2177 }
2178
2179 if (x1->km.state == XFRM_STATE_ACQ) {
2180 if (x->dir && x1->dir != x->dir)
2181 goto out;
2182
2183 __xfrm_state_insert(x);
2184 x = NULL;
2185 } else {
2186 if (x1->dir != x->dir)
2187 goto out;
2188 }
2189 err = 0;
2190
2191 out:
2192 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2193
2194 if (to_put)
2195 xfrm_state_put(to_put);
2196
2197 if (err)
2198 return err;
2199
2200 if (!x) {
2201 xfrm_state_delete(x1);
2202 xfrm_state_put(x1);
2203 return 0;
2204 }
2205
2206 err = -EINVAL;
2207 spin_lock_bh(&x1->lock);
2208 if (likely(x1->km.state == XFRM_STATE_VALID)) {
2209 if (x->encap && x1->encap &&
2210 x->encap->encap_type == x1->encap->encap_type)
2211 memcpy(x1->encap, x->encap, sizeof(*x1->encap));
2212 else if (x->encap || x1->encap)
2213 goto fail;
2214
2215 if (x->coaddr && x1->coaddr) {
2216 memcpy(x1->coaddr, x->coaddr, sizeof(*x1->coaddr));
2217 }
2218 if (!use_spi && memcmp(&x1->sel, &x->sel, sizeof(x1->sel)))
2219 memcpy(&x1->sel, &x->sel, sizeof(x1->sel));
2220 memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
2221 x1->km.dying = 0;
2222
2223 hrtimer_start(&x1->mtimer, ktime_set(1, 0),
2224 HRTIMER_MODE_REL_SOFT);
2225 if (READ_ONCE(x1->curlft.use_time))
2226 xfrm_state_check_expire(x1);
2227
2228 if (x->props.smark.m || x->props.smark.v || x->if_id) {
2229 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2230
2231 if (x->props.smark.m || x->props.smark.v)
2232 x1->props.smark = x->props.smark;
2233
2234 if (x->if_id)
2235 x1->if_id = x->if_id;
2236
2237 __xfrm_state_bump_genids(x1);
2238 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2239 }
2240
2241 err = 0;
2242 x->km.state = XFRM_STATE_DEAD;
2243 __xfrm_state_put(x);
2244 }
2245
2246 fail:
2247 spin_unlock_bh(&x1->lock);
2248
2249 xfrm_state_put(x1);
2250
2251 return err;
2252 }
2253 EXPORT_SYMBOL(xfrm_state_update);
2254
xfrm_state_check_expire(struct xfrm_state * x)2255 int xfrm_state_check_expire(struct xfrm_state *x)
2256 {
2257 xfrm_dev_state_update_stats(x);
2258
2259 if (!READ_ONCE(x->curlft.use_time))
2260 WRITE_ONCE(x->curlft.use_time, ktime_get_real_seconds());
2261
2262 if (x->curlft.bytes >= x->lft.hard_byte_limit ||
2263 x->curlft.packets >= x->lft.hard_packet_limit) {
2264 x->km.state = XFRM_STATE_EXPIRED;
2265 hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
2266 return -EINVAL;
2267 }
2268
2269 if (!x->km.dying &&
2270 (x->curlft.bytes >= x->lft.soft_byte_limit ||
2271 x->curlft.packets >= x->lft.soft_packet_limit)) {
2272 x->km.dying = 1;
2273 km_state_expired(x, 0, 0);
2274 }
2275 return 0;
2276 }
2277 EXPORT_SYMBOL(xfrm_state_check_expire);
2278
xfrm_state_update_stats(struct net * net)2279 void xfrm_state_update_stats(struct net *net)
2280 {
2281 struct xfrm_state *x;
2282 int i;
2283
2284 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2285 for (i = 0; i <= net->xfrm.state_hmask; i++) {
2286 hlist_for_each_entry(x, net->xfrm.state_bydst + i, bydst)
2287 xfrm_dev_state_update_stats(x);
2288 }
2289 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2290 }
2291
2292 struct xfrm_state *
xfrm_state_lookup(struct net * net,u32 mark,const xfrm_address_t * daddr,__be32 spi,u8 proto,unsigned short family)2293 xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi,
2294 u8 proto, unsigned short family)
2295 {
2296 struct xfrm_hash_state_ptrs state_ptrs;
2297 struct xfrm_state *x;
2298
2299 rcu_read_lock();
2300 xfrm_hash_ptrs_get(net, &state_ptrs);
2301
2302 x = __xfrm_state_lookup(&state_ptrs, mark, daddr, spi, proto, family);
2303 rcu_read_unlock();
2304 return x;
2305 }
2306 EXPORT_SYMBOL(xfrm_state_lookup);
2307
2308 struct xfrm_state *
xfrm_state_lookup_byaddr(struct net * net,u32 mark,const xfrm_address_t * daddr,const xfrm_address_t * saddr,u8 proto,unsigned short family)2309 xfrm_state_lookup_byaddr(struct net *net, u32 mark,
2310 const xfrm_address_t *daddr, const xfrm_address_t *saddr,
2311 u8 proto, unsigned short family)
2312 {
2313 struct xfrm_hash_state_ptrs state_ptrs;
2314 struct xfrm_state *x;
2315
2316 rcu_read_lock();
2317
2318 xfrm_hash_ptrs_get(net, &state_ptrs);
2319
2320 x = __xfrm_state_lookup_byaddr(&state_ptrs, mark, daddr, saddr, proto, family);
2321 rcu_read_unlock();
2322 return x;
2323 }
2324 EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
2325
2326 struct xfrm_state *
xfrm_find_acq(struct net * net,const struct xfrm_mark * mark,u8 mode,u32 reqid,u32 if_id,u32 pcpu_num,u8 proto,const xfrm_address_t * daddr,const xfrm_address_t * saddr,int create,unsigned short family)2327 xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
2328 u32 if_id, u32 pcpu_num, u8 proto, const xfrm_address_t *daddr,
2329 const xfrm_address_t *saddr, int create, unsigned short family)
2330 {
2331 struct xfrm_state *x;
2332
2333 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2334 x = __find_acq_core(net, mark, family, mode, reqid, if_id, pcpu_num,
2335 proto, daddr, saddr, create);
2336 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2337
2338 return x;
2339 }
2340 EXPORT_SYMBOL(xfrm_find_acq);
2341
2342 #ifdef CONFIG_XFRM_SUB_POLICY
2343 #if IS_ENABLED(CONFIG_IPV6)
2344 /* distribution counting sort function for xfrm_state and xfrm_tmpl */
2345 static void
__xfrm6_sort(void ** dst,void ** src,int n,int (* cmp)(const void * p),int maxclass)2346 __xfrm6_sort(void **dst, void **src, int n,
2347 int (*cmp)(const void *p), int maxclass)
2348 {
2349 int count[XFRM_MAX_DEPTH] = { };
2350 int class[XFRM_MAX_DEPTH];
2351 int i;
2352
2353 for (i = 0; i < n; i++) {
2354 int c = cmp(src[i]);
2355
2356 class[i] = c;
2357 count[c]++;
2358 }
2359
2360 for (i = 2; i < maxclass; i++)
2361 count[i] += count[i - 1];
2362
2363 for (i = 0; i < n; i++) {
2364 dst[count[class[i] - 1]++] = src[i];
2365 src[i] = NULL;
2366 }
2367 }
2368
2369 /* Rule for xfrm_state:
2370 *
2371 * rule 1: select IPsec transport except AH
2372 * rule 2: select MIPv6 RO or inbound trigger
2373 * rule 3: select IPsec transport AH
2374 * rule 4: select IPsec tunnel
2375 * rule 5: others
2376 */
__xfrm6_state_sort_cmp(const void * p)2377 static int __xfrm6_state_sort_cmp(const void *p)
2378 {
2379 const struct xfrm_state *v = p;
2380
2381 switch (v->props.mode) {
2382 case XFRM_MODE_TRANSPORT:
2383 if (v->id.proto != IPPROTO_AH)
2384 return 1;
2385 else
2386 return 3;
2387 #if IS_ENABLED(CONFIG_IPV6_MIP6)
2388 case XFRM_MODE_ROUTEOPTIMIZATION:
2389 case XFRM_MODE_IN_TRIGGER:
2390 return 2;
2391 #endif
2392 case XFRM_MODE_TUNNEL:
2393 case XFRM_MODE_BEET:
2394 case XFRM_MODE_IPTFS:
2395 return 4;
2396 }
2397 return 5;
2398 }
2399
2400 /* Rule for xfrm_tmpl:
2401 *
2402 * rule 1: select IPsec transport
2403 * rule 2: select MIPv6 RO or inbound trigger
2404 * rule 3: select IPsec tunnel
2405 * rule 4: others
2406 */
__xfrm6_tmpl_sort_cmp(const void * p)2407 static int __xfrm6_tmpl_sort_cmp(const void *p)
2408 {
2409 const struct xfrm_tmpl *v = p;
2410
2411 switch (v->mode) {
2412 case XFRM_MODE_TRANSPORT:
2413 return 1;
2414 #if IS_ENABLED(CONFIG_IPV6_MIP6)
2415 case XFRM_MODE_ROUTEOPTIMIZATION:
2416 case XFRM_MODE_IN_TRIGGER:
2417 return 2;
2418 #endif
2419 case XFRM_MODE_TUNNEL:
2420 case XFRM_MODE_BEET:
2421 case XFRM_MODE_IPTFS:
2422 return 3;
2423 }
2424 return 4;
2425 }
2426 #else
__xfrm6_state_sort_cmp(const void * p)2427 static inline int __xfrm6_state_sort_cmp(const void *p) { return 5; }
__xfrm6_tmpl_sort_cmp(const void * p)2428 static inline int __xfrm6_tmpl_sort_cmp(const void *p) { return 4; }
2429
2430 static inline void
__xfrm6_sort(void ** dst,void ** src,int n,int (* cmp)(const void * p),int maxclass)2431 __xfrm6_sort(void **dst, void **src, int n,
2432 int (*cmp)(const void *p), int maxclass)
2433 {
2434 int i;
2435
2436 for (i = 0; i < n; i++)
2437 dst[i] = src[i];
2438 }
2439 #endif /* CONFIG_IPV6 */
2440
2441 void
xfrm_tmpl_sort(struct xfrm_tmpl ** dst,struct xfrm_tmpl ** src,int n,unsigned short family)2442 xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
2443 unsigned short family)
2444 {
2445 int i;
2446
2447 if (family == AF_INET6)
2448 __xfrm6_sort((void **)dst, (void **)src, n,
2449 __xfrm6_tmpl_sort_cmp, 5);
2450 else
2451 for (i = 0; i < n; i++)
2452 dst[i] = src[i];
2453 }
2454
2455 void
xfrm_state_sort(struct xfrm_state ** dst,struct xfrm_state ** src,int n,unsigned short family)2456 xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
2457 unsigned short family)
2458 {
2459 int i;
2460
2461 if (family == AF_INET6)
2462 __xfrm6_sort((void **)dst, (void **)src, n,
2463 __xfrm6_state_sort_cmp, 6);
2464 else
2465 for (i = 0; i < n; i++)
2466 dst[i] = src[i];
2467 }
2468 #endif
2469
2470 /* Silly enough, but I'm lazy to build resolution list */
2471
__xfrm_find_acq_byseq(struct net * net,u32 mark,u32 seq,u32 pcpu_num)2472 static struct xfrm_state *__xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
2473 {
2474 unsigned int h = xfrm_seq_hash(net, seq);
2475 struct xfrm_state *x;
2476
2477 hlist_for_each_entry_rcu(x, net->xfrm.state_byseq + h, byseq) {
2478 if (x->km.seq == seq &&
2479 (mark & x->mark.m) == x->mark.v &&
2480 x->pcpu_num == pcpu_num &&
2481 x->km.state == XFRM_STATE_ACQ) {
2482 xfrm_state_hold(x);
2483 return x;
2484 }
2485 }
2486
2487 return NULL;
2488 }
2489
xfrm_find_acq_byseq(struct net * net,u32 mark,u32 seq,u32 pcpu_num)2490 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num)
2491 {
2492 struct xfrm_state *x;
2493
2494 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2495 x = __xfrm_find_acq_byseq(net, mark, seq, pcpu_num);
2496 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2497 return x;
2498 }
2499 EXPORT_SYMBOL(xfrm_find_acq_byseq);
2500
xfrm_get_acqseq(void)2501 u32 xfrm_get_acqseq(void)
2502 {
2503 u32 res;
2504 static atomic_t acqseq;
2505
2506 do {
2507 res = atomic_inc_return(&acqseq);
2508 } while (!res);
2509
2510 return res;
2511 }
2512 EXPORT_SYMBOL(xfrm_get_acqseq);
2513
verify_spi_info(u8 proto,u32 min,u32 max,struct netlink_ext_ack * extack)2514 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack)
2515 {
2516 switch (proto) {
2517 case IPPROTO_AH:
2518 case IPPROTO_ESP:
2519 break;
2520
2521 case IPPROTO_COMP:
2522 /* IPCOMP spi is 16-bits. */
2523 if (max >= 0x10000) {
2524 NL_SET_ERR_MSG(extack, "IPCOMP SPI must be <= 65535");
2525 return -EINVAL;
2526 }
2527 break;
2528
2529 default:
2530 NL_SET_ERR_MSG(extack, "Invalid protocol, must be one of AH, ESP, IPCOMP");
2531 return -EINVAL;
2532 }
2533
2534 if (min > max) {
2535 NL_SET_ERR_MSG(extack, "Invalid SPI range: min > max");
2536 return -EINVAL;
2537 }
2538
2539 return 0;
2540 }
2541 EXPORT_SYMBOL(verify_spi_info);
2542
xfrm_alloc_spi(struct xfrm_state * x,u32 low,u32 high,struct netlink_ext_ack * extack)2543 int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high,
2544 struct netlink_ext_ack *extack)
2545 {
2546 struct net *net = xs_net(x);
2547 unsigned int h;
2548 struct xfrm_state *x0;
2549 int err = -ENOENT;
2550 __be32 minspi = htonl(low);
2551 __be32 maxspi = htonl(high);
2552 __be32 newspi = 0;
2553 u32 mark = x->mark.v & x->mark.m;
2554
2555 spin_lock_bh(&x->lock);
2556 if (x->km.state == XFRM_STATE_DEAD) {
2557 NL_SET_ERR_MSG(extack, "Target ACQUIRE is in DEAD state");
2558 goto unlock;
2559 }
2560
2561 err = 0;
2562 if (x->id.spi)
2563 goto unlock;
2564
2565 err = -ENOENT;
2566
2567 if (minspi == maxspi) {
2568 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, minspi, x->id.proto, x->props.family);
2569 if (x0) {
2570 NL_SET_ERR_MSG(extack, "Requested SPI is already in use");
2571 xfrm_state_put(x0);
2572 goto unlock;
2573 }
2574 newspi = minspi;
2575 } else {
2576 u32 spi = 0;
2577 for (h = 0; h < high-low+1; h++) {
2578 spi = get_random_u32_inclusive(low, high);
2579 x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family);
2580 if (x0 == NULL) {
2581 newspi = htonl(spi);
2582 break;
2583 }
2584 xfrm_state_put(x0);
2585 }
2586 }
2587 if (newspi) {
2588 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2589 x->id.spi = newspi;
2590 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family);
2591 XFRM_STATE_INSERT(byspi, &x->byspi, net->xfrm.state_byspi + h,
2592 x->xso.type);
2593 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2594
2595 err = 0;
2596 } else {
2597 NL_SET_ERR_MSG(extack, "No SPI available in the requested range");
2598 }
2599
2600 unlock:
2601 spin_unlock_bh(&x->lock);
2602
2603 return err;
2604 }
2605 EXPORT_SYMBOL(xfrm_alloc_spi);
2606
__xfrm_state_filter_match(struct xfrm_state * x,struct xfrm_address_filter * filter)2607 static bool __xfrm_state_filter_match(struct xfrm_state *x,
2608 struct xfrm_address_filter *filter)
2609 {
2610 if (filter) {
2611 if ((filter->family == AF_INET ||
2612 filter->family == AF_INET6) &&
2613 x->props.family != filter->family)
2614 return false;
2615
2616 return addr_match(&x->props.saddr, &filter->saddr,
2617 filter->splen) &&
2618 addr_match(&x->id.daddr, &filter->daddr,
2619 filter->dplen);
2620 }
2621 return true;
2622 }
2623
xfrm_state_walk(struct net * net,struct xfrm_state_walk * walk,int (* func)(struct xfrm_state *,int,void *),void * data)2624 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
2625 int (*func)(struct xfrm_state *, int, void*),
2626 void *data)
2627 {
2628 struct xfrm_state *state;
2629 struct xfrm_state_walk *x;
2630 int err = 0;
2631
2632 if (walk->seq != 0 && list_empty(&walk->all))
2633 return 0;
2634
2635 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2636 if (list_empty(&walk->all))
2637 x = list_first_entry(&net->xfrm.state_all, struct xfrm_state_walk, all);
2638 else
2639 x = list_first_entry(&walk->all, struct xfrm_state_walk, all);
2640 list_for_each_entry_from(x, &net->xfrm.state_all, all) {
2641 if (x->state == XFRM_STATE_DEAD)
2642 continue;
2643 state = container_of(x, struct xfrm_state, km);
2644 if (!xfrm_id_proto_match(state->id.proto, walk->proto))
2645 continue;
2646 if (!__xfrm_state_filter_match(state, walk->filter))
2647 continue;
2648 err = func(state, walk->seq, data);
2649 if (err) {
2650 list_move_tail(&walk->all, &x->all);
2651 goto out;
2652 }
2653 walk->seq++;
2654 }
2655 if (walk->seq == 0) {
2656 err = -ENOENT;
2657 goto out;
2658 }
2659 list_del_init(&walk->all);
2660 out:
2661 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2662 return err;
2663 }
2664 EXPORT_SYMBOL(xfrm_state_walk);
2665
xfrm_state_walk_init(struct xfrm_state_walk * walk,u8 proto,struct xfrm_address_filter * filter)2666 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
2667 struct xfrm_address_filter *filter)
2668 {
2669 INIT_LIST_HEAD(&walk->all);
2670 walk->proto = proto;
2671 walk->state = XFRM_STATE_DEAD;
2672 walk->seq = 0;
2673 walk->filter = filter;
2674 }
2675 EXPORT_SYMBOL(xfrm_state_walk_init);
2676
xfrm_state_walk_done(struct xfrm_state_walk * walk,struct net * net)2677 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
2678 {
2679 kfree(walk->filter);
2680
2681 if (list_empty(&walk->all))
2682 return;
2683
2684 spin_lock_bh(&net->xfrm.xfrm_state_lock);
2685 list_del(&walk->all);
2686 spin_unlock_bh(&net->xfrm.xfrm_state_lock);
2687 }
2688 EXPORT_SYMBOL(xfrm_state_walk_done);
2689
xfrm_replay_timer_handler(struct timer_list * t)2690 static void xfrm_replay_timer_handler(struct timer_list *t)
2691 {
2692 struct xfrm_state *x = from_timer(x, t, rtimer);
2693
2694 spin_lock(&x->lock);
2695
2696 if (x->km.state == XFRM_STATE_VALID) {
2697 if (xfrm_aevent_is_on(xs_net(x)))
2698 xfrm_replay_notify(x, XFRM_REPLAY_TIMEOUT);
2699 else
2700 x->xflags |= XFRM_TIME_DEFER;
2701 }
2702
2703 spin_unlock(&x->lock);
2704 }
2705
2706 static LIST_HEAD(xfrm_km_list);
2707
km_policy_notify(struct xfrm_policy * xp,int dir,const struct km_event * c)2708 void km_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
2709 {
2710 struct xfrm_mgr *km;
2711
2712 rcu_read_lock();
2713 list_for_each_entry_rcu(km, &xfrm_km_list, list)
2714 if (km->notify_policy)
2715 km->notify_policy(xp, dir, c);
2716 rcu_read_unlock();
2717 }
2718
km_state_notify(struct xfrm_state * x,const struct km_event * c)2719 void km_state_notify(struct xfrm_state *x, const struct km_event *c)
2720 {
2721 struct xfrm_mgr *km;
2722 rcu_read_lock();
2723 list_for_each_entry_rcu(km, &xfrm_km_list, list)
2724 if (km->notify)
2725 km->notify(x, c);
2726 rcu_read_unlock();
2727 }
2728
2729 EXPORT_SYMBOL(km_policy_notify);
2730 EXPORT_SYMBOL(km_state_notify);
2731
km_state_expired(struct xfrm_state * x,int hard,u32 portid)2732 void km_state_expired(struct xfrm_state *x, int hard, u32 portid)
2733 {
2734 struct km_event c;
2735
2736 c.data.hard = hard;
2737 c.portid = portid;
2738 c.event = XFRM_MSG_EXPIRE;
2739 km_state_notify(x, &c);
2740 }
2741
2742 EXPORT_SYMBOL(km_state_expired);
2743 /*
2744 * We send to all registered managers regardless of failure
2745 * We are happy with one success
2746 */
km_query(struct xfrm_state * x,struct xfrm_tmpl * t,struct xfrm_policy * pol)2747 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol)
2748 {
2749 int err = -EINVAL, acqret;
2750 struct xfrm_mgr *km;
2751
2752 rcu_read_lock();
2753 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2754 acqret = km->acquire(x, t, pol);
2755 if (!acqret)
2756 err = acqret;
2757 }
2758 rcu_read_unlock();
2759 return err;
2760 }
2761 EXPORT_SYMBOL(km_query);
2762
__km_new_mapping(struct xfrm_state * x,xfrm_address_t * ipaddr,__be16 sport)2763 static int __km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2764 {
2765 int err = -EINVAL;
2766 struct xfrm_mgr *km;
2767
2768 rcu_read_lock();
2769 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2770 if (km->new_mapping)
2771 err = km->new_mapping(x, ipaddr, sport);
2772 if (!err)
2773 break;
2774 }
2775 rcu_read_unlock();
2776 return err;
2777 }
2778
km_new_mapping(struct xfrm_state * x,xfrm_address_t * ipaddr,__be16 sport)2779 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport)
2780 {
2781 int ret = 0;
2782
2783 if (x->mapping_maxage) {
2784 if ((jiffies / HZ - x->new_mapping) > x->mapping_maxage ||
2785 x->new_mapping_sport != sport) {
2786 x->new_mapping_sport = sport;
2787 x->new_mapping = jiffies / HZ;
2788 ret = __km_new_mapping(x, ipaddr, sport);
2789 }
2790 } else {
2791 ret = __km_new_mapping(x, ipaddr, sport);
2792 }
2793
2794 return ret;
2795 }
2796 EXPORT_SYMBOL(km_new_mapping);
2797
km_policy_expired(struct xfrm_policy * pol,int dir,int hard,u32 portid)2798 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid)
2799 {
2800 struct km_event c;
2801
2802 c.data.hard = hard;
2803 c.portid = portid;
2804 c.event = XFRM_MSG_POLEXPIRE;
2805 km_policy_notify(pol, dir, &c);
2806 }
2807 EXPORT_SYMBOL(km_policy_expired);
2808
2809 #ifdef CONFIG_XFRM_MIGRATE
km_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,const struct xfrm_migrate * m,int num_migrate,const struct xfrm_kmaddress * k,const struct xfrm_encap_tmpl * encap)2810 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2811 const struct xfrm_migrate *m, int num_migrate,
2812 const struct xfrm_kmaddress *k,
2813 const struct xfrm_encap_tmpl *encap)
2814 {
2815 int err = -EINVAL;
2816 int ret;
2817 struct xfrm_mgr *km;
2818
2819 rcu_read_lock();
2820 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2821 if (km->migrate) {
2822 ret = km->migrate(sel, dir, type, m, num_migrate, k,
2823 encap);
2824 if (!ret)
2825 err = ret;
2826 }
2827 }
2828 rcu_read_unlock();
2829 return err;
2830 }
2831 EXPORT_SYMBOL(km_migrate);
2832 #endif
2833
km_report(struct net * net,u8 proto,struct xfrm_selector * sel,xfrm_address_t * addr)2834 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr)
2835 {
2836 int err = -EINVAL;
2837 int ret;
2838 struct xfrm_mgr *km;
2839
2840 rcu_read_lock();
2841 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2842 if (km->report) {
2843 ret = km->report(net, proto, sel, addr);
2844 if (!ret)
2845 err = ret;
2846 }
2847 }
2848 rcu_read_unlock();
2849 return err;
2850 }
2851 EXPORT_SYMBOL(km_report);
2852
km_is_alive(const struct km_event * c)2853 static bool km_is_alive(const struct km_event *c)
2854 {
2855 struct xfrm_mgr *km;
2856 bool is_alive = false;
2857
2858 rcu_read_lock();
2859 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2860 if (km->is_alive && km->is_alive(c)) {
2861 is_alive = true;
2862 break;
2863 }
2864 }
2865 rcu_read_unlock();
2866
2867 return is_alive;
2868 }
2869
2870 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2871 static DEFINE_SPINLOCK(xfrm_translator_lock);
2872 static struct xfrm_translator __rcu *xfrm_translator;
2873
xfrm_get_translator(void)2874 struct xfrm_translator *xfrm_get_translator(void)
2875 {
2876 struct xfrm_translator *xtr;
2877
2878 rcu_read_lock();
2879 xtr = rcu_dereference(xfrm_translator);
2880 if (unlikely(!xtr))
2881 goto out;
2882 if (!try_module_get(xtr->owner))
2883 xtr = NULL;
2884 out:
2885 rcu_read_unlock();
2886 return xtr;
2887 }
2888 EXPORT_SYMBOL_GPL(xfrm_get_translator);
2889
xfrm_put_translator(struct xfrm_translator * xtr)2890 void xfrm_put_translator(struct xfrm_translator *xtr)
2891 {
2892 module_put(xtr->owner);
2893 }
2894 EXPORT_SYMBOL_GPL(xfrm_put_translator);
2895
xfrm_register_translator(struct xfrm_translator * xtr)2896 int xfrm_register_translator(struct xfrm_translator *xtr)
2897 {
2898 int err = 0;
2899
2900 spin_lock_bh(&xfrm_translator_lock);
2901 if (unlikely(xfrm_translator != NULL))
2902 err = -EEXIST;
2903 else
2904 rcu_assign_pointer(xfrm_translator, xtr);
2905 spin_unlock_bh(&xfrm_translator_lock);
2906
2907 return err;
2908 }
2909 EXPORT_SYMBOL_GPL(xfrm_register_translator);
2910
xfrm_unregister_translator(struct xfrm_translator * xtr)2911 int xfrm_unregister_translator(struct xfrm_translator *xtr)
2912 {
2913 int err = 0;
2914
2915 spin_lock_bh(&xfrm_translator_lock);
2916 if (likely(xfrm_translator != NULL)) {
2917 if (rcu_access_pointer(xfrm_translator) != xtr)
2918 err = -EINVAL;
2919 else
2920 RCU_INIT_POINTER(xfrm_translator, NULL);
2921 }
2922 spin_unlock_bh(&xfrm_translator_lock);
2923 synchronize_rcu();
2924
2925 return err;
2926 }
2927 EXPORT_SYMBOL_GPL(xfrm_unregister_translator);
2928 #endif
2929
xfrm_user_policy(struct sock * sk,int optname,sockptr_t optval,int optlen)2930 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval, int optlen)
2931 {
2932 int err;
2933 u8 *data;
2934 struct xfrm_mgr *km;
2935 struct xfrm_policy *pol = NULL;
2936
2937 if (sockptr_is_null(optval) && !optlen) {
2938 xfrm_sk_policy_insert(sk, XFRM_POLICY_IN, NULL);
2939 xfrm_sk_policy_insert(sk, XFRM_POLICY_OUT, NULL);
2940 __sk_dst_reset(sk);
2941 return 0;
2942 }
2943
2944 if (optlen <= 0 || optlen > PAGE_SIZE)
2945 return -EMSGSIZE;
2946
2947 data = memdup_sockptr(optval, optlen);
2948 if (IS_ERR(data))
2949 return PTR_ERR(data);
2950
2951 if (in_compat_syscall()) {
2952 struct xfrm_translator *xtr = xfrm_get_translator();
2953
2954 if (!xtr) {
2955 kfree(data);
2956 return -EOPNOTSUPP;
2957 }
2958
2959 err = xtr->xlate_user_policy_sockptr(&data, optlen);
2960 xfrm_put_translator(xtr);
2961 if (err) {
2962 kfree(data);
2963 return err;
2964 }
2965 }
2966
2967 err = -EINVAL;
2968 rcu_read_lock();
2969 list_for_each_entry_rcu(km, &xfrm_km_list, list) {
2970 pol = km->compile_policy(sk, optname, data,
2971 optlen, &err);
2972 if (err >= 0)
2973 break;
2974 }
2975 rcu_read_unlock();
2976
2977 if (err >= 0) {
2978 xfrm_sk_policy_insert(sk, err, pol);
2979 xfrm_pol_put(pol);
2980 __sk_dst_reset(sk);
2981 err = 0;
2982 }
2983
2984 kfree(data);
2985 return err;
2986 }
2987 EXPORT_SYMBOL(xfrm_user_policy);
2988
2989 static DEFINE_SPINLOCK(xfrm_km_lock);
2990
xfrm_register_km(struct xfrm_mgr * km)2991 void xfrm_register_km(struct xfrm_mgr *km)
2992 {
2993 spin_lock_bh(&xfrm_km_lock);
2994 list_add_tail_rcu(&km->list, &xfrm_km_list);
2995 spin_unlock_bh(&xfrm_km_lock);
2996 }
2997 EXPORT_SYMBOL(xfrm_register_km);
2998
xfrm_unregister_km(struct xfrm_mgr * km)2999 void xfrm_unregister_km(struct xfrm_mgr *km)
3000 {
3001 spin_lock_bh(&xfrm_km_lock);
3002 list_del_rcu(&km->list);
3003 spin_unlock_bh(&xfrm_km_lock);
3004 synchronize_rcu();
3005 }
3006 EXPORT_SYMBOL(xfrm_unregister_km);
3007
xfrm_state_register_afinfo(struct xfrm_state_afinfo * afinfo)3008 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
3009 {
3010 int err = 0;
3011
3012 if (WARN_ON(afinfo->family >= NPROTO))
3013 return -EAFNOSUPPORT;
3014
3015 spin_lock_bh(&xfrm_state_afinfo_lock);
3016 if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
3017 err = -EEXIST;
3018 else
3019 rcu_assign_pointer(xfrm_state_afinfo[afinfo->family], afinfo);
3020 spin_unlock_bh(&xfrm_state_afinfo_lock);
3021 return err;
3022 }
3023 EXPORT_SYMBOL(xfrm_state_register_afinfo);
3024
xfrm_state_unregister_afinfo(struct xfrm_state_afinfo * afinfo)3025 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
3026 {
3027 int err = 0, family = afinfo->family;
3028
3029 if (WARN_ON(family >= NPROTO))
3030 return -EAFNOSUPPORT;
3031
3032 spin_lock_bh(&xfrm_state_afinfo_lock);
3033 if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
3034 if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
3035 err = -EINVAL;
3036 else
3037 RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
3038 }
3039 spin_unlock_bh(&xfrm_state_afinfo_lock);
3040 synchronize_rcu();
3041 return err;
3042 }
3043 EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
3044
xfrm_state_afinfo_get_rcu(unsigned int family)3045 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
3046 {
3047 if (unlikely(family >= NPROTO))
3048 return NULL;
3049
3050 return rcu_dereference(xfrm_state_afinfo[family]);
3051 }
3052 EXPORT_SYMBOL_GPL(xfrm_state_afinfo_get_rcu);
3053
xfrm_state_get_afinfo(unsigned int family)3054 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
3055 {
3056 struct xfrm_state_afinfo *afinfo;
3057 if (unlikely(family >= NPROTO))
3058 return NULL;
3059 rcu_read_lock();
3060 afinfo = rcu_dereference(xfrm_state_afinfo[family]);
3061 if (unlikely(!afinfo))
3062 rcu_read_unlock();
3063 return afinfo;
3064 }
3065
xfrm_flush_gc(void)3066 void xfrm_flush_gc(void)
3067 {
3068 flush_work(&xfrm_state_gc_work);
3069 }
3070 EXPORT_SYMBOL(xfrm_flush_gc);
3071
3072 /* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
xfrm_state_delete_tunnel(struct xfrm_state * x)3073 void xfrm_state_delete_tunnel(struct xfrm_state *x)
3074 {
3075 if (x->tunnel) {
3076 struct xfrm_state *t = x->tunnel;
3077
3078 if (atomic_read(&t->tunnel_users) == 2)
3079 xfrm_state_delete(t);
3080 atomic_dec(&t->tunnel_users);
3081 xfrm_state_put_sync(t);
3082 x->tunnel = NULL;
3083 }
3084 }
3085 EXPORT_SYMBOL(xfrm_state_delete_tunnel);
3086
xfrm_state_mtu(struct xfrm_state * x,int mtu)3087 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu)
3088 {
3089 const struct xfrm_type *type = READ_ONCE(x->type);
3090 struct crypto_aead *aead;
3091 u32 blksize, net_adj = 0;
3092
3093 if (x->km.state != XFRM_STATE_VALID ||
3094 !type || type->proto != IPPROTO_ESP)
3095 return mtu - x->props.header_len;
3096
3097 aead = x->data;
3098 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
3099
3100 switch (x->props.mode) {
3101 case XFRM_MODE_TRANSPORT:
3102 case XFRM_MODE_BEET:
3103 if (x->props.family == AF_INET)
3104 net_adj = sizeof(struct iphdr);
3105 else if (x->props.family == AF_INET6)
3106 net_adj = sizeof(struct ipv6hdr);
3107 break;
3108 case XFRM_MODE_TUNNEL:
3109 break;
3110 default:
3111 if (x->mode_cbs && x->mode_cbs->get_inner_mtu)
3112 return x->mode_cbs->get_inner_mtu(x, mtu);
3113
3114 WARN_ON_ONCE(1);
3115 break;
3116 }
3117
3118 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
3119 net_adj) & ~(blksize - 1)) + net_adj - 2;
3120 }
3121 EXPORT_SYMBOL_GPL(xfrm_state_mtu);
3122
__xfrm_init_state(struct xfrm_state * x,struct netlink_ext_ack * extack)3123 int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack)
3124 {
3125 const struct xfrm_mode *inner_mode;
3126 const struct xfrm_mode *outer_mode;
3127 int family = x->props.family;
3128 int err;
3129
3130 if (family == AF_INET &&
3131 READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc))
3132 x->props.flags |= XFRM_STATE_NOPMTUDISC;
3133
3134 err = -EPROTONOSUPPORT;
3135
3136 if (x->sel.family != AF_UNSPEC) {
3137 inner_mode = xfrm_get_mode(x->props.mode, x->sel.family);
3138 if (inner_mode == NULL) {
3139 NL_SET_ERR_MSG(extack, "Requested mode not found");
3140 goto error;
3141 }
3142
3143 if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL) &&
3144 family != x->sel.family) {
3145 NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate a change of family");
3146 goto error;
3147 }
3148
3149 x->inner_mode = *inner_mode;
3150 } else {
3151 const struct xfrm_mode *inner_mode_iaf;
3152 int iafamily = AF_INET;
3153
3154 inner_mode = xfrm_get_mode(x->props.mode, x->props.family);
3155 if (inner_mode == NULL) {
3156 NL_SET_ERR_MSG(extack, "Requested mode not found");
3157 goto error;
3158 }
3159
3160 x->inner_mode = *inner_mode;
3161
3162 if (x->props.family == AF_INET)
3163 iafamily = AF_INET6;
3164
3165 inner_mode_iaf = xfrm_get_mode(x->props.mode, iafamily);
3166 if (inner_mode_iaf) {
3167 if (inner_mode_iaf->flags & XFRM_MODE_FLAG_TUNNEL)
3168 x->inner_mode_iaf = *inner_mode_iaf;
3169 }
3170 }
3171
3172 x->type = xfrm_get_type(x->id.proto, family);
3173 if (x->type == NULL) {
3174 NL_SET_ERR_MSG(extack, "Requested type not found");
3175 goto error;
3176 }
3177
3178 err = x->type->init_state(x, extack);
3179 if (err)
3180 goto error;
3181
3182 outer_mode = xfrm_get_mode(x->props.mode, family);
3183 if (!outer_mode) {
3184 NL_SET_ERR_MSG(extack, "Requested mode not found");
3185 err = -EPROTONOSUPPORT;
3186 goto error;
3187 }
3188
3189 x->outer_mode = *outer_mode;
3190 if (x->nat_keepalive_interval) {
3191 if (x->dir != XFRM_SA_DIR_OUT) {
3192 NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs");
3193 err = -EINVAL;
3194 goto error;
3195 }
3196
3197 if (!x->encap || x->encap->encap_type != UDP_ENCAP_ESPINUDP) {
3198 NL_SET_ERR_MSG(extack,
3199 "NAT keepalive is only supported for UDP encapsulation");
3200 err = -EINVAL;
3201 goto error;
3202 }
3203 }
3204
3205 x->mode_cbs = xfrm_get_mode_cbs(x->props.mode);
3206 if (x->mode_cbs) {
3207 if (x->mode_cbs->init_state)
3208 err = x->mode_cbs->init_state(x);
3209 module_put(x->mode_cbs->owner);
3210 }
3211 error:
3212 return err;
3213 }
3214
3215 EXPORT_SYMBOL(__xfrm_init_state);
3216
xfrm_init_state(struct xfrm_state * x)3217 int xfrm_init_state(struct xfrm_state *x)
3218 {
3219 int err;
3220
3221 err = __xfrm_init_state(x, NULL);
3222 if (err)
3223 return err;
3224
3225 err = xfrm_init_replay(x, NULL);
3226 if (err)
3227 return err;
3228
3229 x->km.state = XFRM_STATE_VALID;
3230 return 0;
3231 }
3232
3233 EXPORT_SYMBOL(xfrm_init_state);
3234
xfrm_state_init(struct net * net)3235 int __net_init xfrm_state_init(struct net *net)
3236 {
3237 unsigned int sz;
3238
3239 if (net_eq(net, &init_net))
3240 xfrm_state_cache = KMEM_CACHE(xfrm_state,
3241 SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3242
3243 INIT_LIST_HEAD(&net->xfrm.state_all);
3244
3245 sz = sizeof(struct hlist_head) * 8;
3246
3247 net->xfrm.state_bydst = xfrm_hash_alloc(sz);
3248 if (!net->xfrm.state_bydst)
3249 goto out_bydst;
3250 net->xfrm.state_bysrc = xfrm_hash_alloc(sz);
3251 if (!net->xfrm.state_bysrc)
3252 goto out_bysrc;
3253 net->xfrm.state_byspi = xfrm_hash_alloc(sz);
3254 if (!net->xfrm.state_byspi)
3255 goto out_byspi;
3256 net->xfrm.state_byseq = xfrm_hash_alloc(sz);
3257 if (!net->xfrm.state_byseq)
3258 goto out_byseq;
3259
3260 net->xfrm.state_cache_input = alloc_percpu(struct hlist_head);
3261 if (!net->xfrm.state_cache_input)
3262 goto out_state_cache_input;
3263
3264 net->xfrm.state_hmask = ((sz / sizeof(struct hlist_head)) - 1);
3265
3266 net->xfrm.state_num = 0;
3267 INIT_WORK(&net->xfrm.state_hash_work, xfrm_hash_resize);
3268 spin_lock_init(&net->xfrm.xfrm_state_lock);
3269 seqcount_spinlock_init(&net->xfrm.xfrm_state_hash_generation,
3270 &net->xfrm.xfrm_state_lock);
3271 return 0;
3272
3273 out_state_cache_input:
3274 xfrm_hash_free(net->xfrm.state_byseq, sz);
3275 out_byseq:
3276 xfrm_hash_free(net->xfrm.state_byspi, sz);
3277 out_byspi:
3278 xfrm_hash_free(net->xfrm.state_bysrc, sz);
3279 out_bysrc:
3280 xfrm_hash_free(net->xfrm.state_bydst, sz);
3281 out_bydst:
3282 return -ENOMEM;
3283 }
3284
xfrm_state_fini(struct net * net)3285 void xfrm_state_fini(struct net *net)
3286 {
3287 unsigned int sz;
3288
3289 flush_work(&net->xfrm.state_hash_work);
3290 flush_work(&xfrm_state_gc_work);
3291 xfrm_state_flush(net, 0, false, true);
3292
3293 WARN_ON(!list_empty(&net->xfrm.state_all));
3294
3295 sz = (net->xfrm.state_hmask + 1) * sizeof(struct hlist_head);
3296 WARN_ON(!hlist_empty(net->xfrm.state_byseq));
3297 xfrm_hash_free(net->xfrm.state_byseq, sz);
3298 WARN_ON(!hlist_empty(net->xfrm.state_byspi));
3299 xfrm_hash_free(net->xfrm.state_byspi, sz);
3300 WARN_ON(!hlist_empty(net->xfrm.state_bysrc));
3301 xfrm_hash_free(net->xfrm.state_bysrc, sz);
3302 WARN_ON(!hlist_empty(net->xfrm.state_bydst));
3303 xfrm_hash_free(net->xfrm.state_bydst, sz);
3304 free_percpu(net->xfrm.state_cache_input);
3305 }
3306
3307 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_helper_sainfo(struct xfrm_state * x,struct audit_buffer * audit_buf)3308 static void xfrm_audit_helper_sainfo(struct xfrm_state *x,
3309 struct audit_buffer *audit_buf)
3310 {
3311 struct xfrm_sec_ctx *ctx = x->security;
3312 u32 spi = ntohl(x->id.spi);
3313
3314 if (ctx)
3315 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s",
3316 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str);
3317
3318 switch (x->props.family) {
3319 case AF_INET:
3320 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
3321 &x->props.saddr.a4, &x->id.daddr.a4);
3322 break;
3323 case AF_INET6:
3324 audit_log_format(audit_buf, " src=%pI6 dst=%pI6",
3325 x->props.saddr.a6, x->id.daddr.a6);
3326 break;
3327 }
3328
3329 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
3330 }
3331
xfrm_audit_helper_pktinfo(struct sk_buff * skb,u16 family,struct audit_buffer * audit_buf)3332 static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
3333 struct audit_buffer *audit_buf)
3334 {
3335 const struct iphdr *iph4;
3336 const struct ipv6hdr *iph6;
3337
3338 switch (family) {
3339 case AF_INET:
3340 iph4 = ip_hdr(skb);
3341 audit_log_format(audit_buf, " src=%pI4 dst=%pI4",
3342 &iph4->saddr, &iph4->daddr);
3343 break;
3344 case AF_INET6:
3345 iph6 = ipv6_hdr(skb);
3346 audit_log_format(audit_buf,
3347 " src=%pI6 dst=%pI6 flowlbl=0x%x%02x%02x",
3348 &iph6->saddr, &iph6->daddr,
3349 iph6->flow_lbl[0] & 0x0f,
3350 iph6->flow_lbl[1],
3351 iph6->flow_lbl[2]);
3352 break;
3353 }
3354 }
3355
xfrm_audit_state_add(struct xfrm_state * x,int result,bool task_valid)3356 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid)
3357 {
3358 struct audit_buffer *audit_buf;
3359
3360 audit_buf = xfrm_audit_start("SAD-add");
3361 if (audit_buf == NULL)
3362 return;
3363 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3364 xfrm_audit_helper_sainfo(x, audit_buf);
3365 audit_log_format(audit_buf, " res=%u", result);
3366 audit_log_end(audit_buf);
3367 }
3368 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
3369
xfrm_audit_state_delete(struct xfrm_state * x,int result,bool task_valid)3370 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid)
3371 {
3372 struct audit_buffer *audit_buf;
3373
3374 audit_buf = xfrm_audit_start("SAD-delete");
3375 if (audit_buf == NULL)
3376 return;
3377 xfrm_audit_helper_usrinfo(task_valid, audit_buf);
3378 xfrm_audit_helper_sainfo(x, audit_buf);
3379 audit_log_format(audit_buf, " res=%u", result);
3380 audit_log_end(audit_buf);
3381 }
3382 EXPORT_SYMBOL_GPL(xfrm_audit_state_delete);
3383
xfrm_audit_state_replay_overflow(struct xfrm_state * x,struct sk_buff * skb)3384 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
3385 struct sk_buff *skb)
3386 {
3387 struct audit_buffer *audit_buf;
3388 u32 spi;
3389
3390 audit_buf = xfrm_audit_start("SA-replay-overflow");
3391 if (audit_buf == NULL)
3392 return;
3393 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3394 /* don't record the sequence number because it's inherent in this kind
3395 * of audit message */
3396 spi = ntohl(x->id.spi);
3397 audit_log_format(audit_buf, " spi=%u(0x%x)", spi, spi);
3398 audit_log_end(audit_buf);
3399 }
3400 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay_overflow);
3401
xfrm_audit_state_replay(struct xfrm_state * x,struct sk_buff * skb,__be32 net_seq)3402 void xfrm_audit_state_replay(struct xfrm_state *x,
3403 struct sk_buff *skb, __be32 net_seq)
3404 {
3405 struct audit_buffer *audit_buf;
3406 u32 spi;
3407
3408 audit_buf = xfrm_audit_start("SA-replayed-pkt");
3409 if (audit_buf == NULL)
3410 return;
3411 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3412 spi = ntohl(x->id.spi);
3413 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3414 spi, spi, ntohl(net_seq));
3415 audit_log_end(audit_buf);
3416 }
3417 EXPORT_SYMBOL_GPL(xfrm_audit_state_replay);
3418
xfrm_audit_state_notfound_simple(struct sk_buff * skb,u16 family)3419 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family)
3420 {
3421 struct audit_buffer *audit_buf;
3422
3423 audit_buf = xfrm_audit_start("SA-notfound");
3424 if (audit_buf == NULL)
3425 return;
3426 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
3427 audit_log_end(audit_buf);
3428 }
3429 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound_simple);
3430
xfrm_audit_state_notfound(struct sk_buff * skb,u16 family,__be32 net_spi,__be32 net_seq)3431 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
3432 __be32 net_spi, __be32 net_seq)
3433 {
3434 struct audit_buffer *audit_buf;
3435 u32 spi;
3436
3437 audit_buf = xfrm_audit_start("SA-notfound");
3438 if (audit_buf == NULL)
3439 return;
3440 xfrm_audit_helper_pktinfo(skb, family, audit_buf);
3441 spi = ntohl(net_spi);
3442 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3443 spi, spi, ntohl(net_seq));
3444 audit_log_end(audit_buf);
3445 }
3446 EXPORT_SYMBOL_GPL(xfrm_audit_state_notfound);
3447
xfrm_audit_state_icvfail(struct xfrm_state * x,struct sk_buff * skb,u8 proto)3448 void xfrm_audit_state_icvfail(struct xfrm_state *x,
3449 struct sk_buff *skb, u8 proto)
3450 {
3451 struct audit_buffer *audit_buf;
3452 __be32 net_spi;
3453 __be32 net_seq;
3454
3455 audit_buf = xfrm_audit_start("SA-icv-failure");
3456 if (audit_buf == NULL)
3457 return;
3458 xfrm_audit_helper_pktinfo(skb, x->props.family, audit_buf);
3459 if (xfrm_parse_spi(skb, proto, &net_spi, &net_seq) == 0) {
3460 u32 spi = ntohl(net_spi);
3461 audit_log_format(audit_buf, " spi=%u(0x%x) seqno=%u",
3462 spi, spi, ntohl(net_seq));
3463 }
3464 audit_log_end(audit_buf);
3465 }
3466 EXPORT_SYMBOL_GPL(xfrm_audit_state_icvfail);
3467 #endif /* CONFIG_AUDITSYSCALL */
3468