1 // SPDX-License-Identifier: GPL-2.0-only
2 /* xfrm_user.c: User interface to configure xfrm engine.
3 *
4 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
5 *
6 * Changes:
7 * Mitsuru KANDA @USAGI
8 * Kazunori MIYAZAWA @USAGI
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 * IPv6 support
11 *
12 */
13
14 #include <linux/compat.h>
15 #include <linux/crypto.h>
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/slab.h>
20 #include <linux/socket.h>
21 #include <linux/string.h>
22 #include <linux/net.h>
23 #include <linux/skbuff.h>
24 #include <linux/pfkeyv2.h>
25 #include <linux/ipsec.h>
26 #include <linux/init.h>
27 #include <linux/security.h>
28 #include <net/sock.h>
29 #include <net/xfrm.h>
30 #include <net/netlink.h>
31 #include <net/ah.h>
32 #include <linux/uaccess.h>
33 #if IS_ENABLED(CONFIG_IPV6)
34 #include <linux/in6.h>
35 #endif
36 #include <linux/unaligned.h>
37
verify_one_alg(struct nlattr ** attrs,enum xfrm_attr_type_t type,struct netlink_ext_ack * extack)38 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type,
39 struct netlink_ext_ack *extack)
40 {
41 struct nlattr *rt = attrs[type];
42 struct xfrm_algo *algp;
43
44 if (!rt)
45 return 0;
46
47 algp = nla_data(rt);
48 if (nla_len(rt) < (int)xfrm_alg_len(algp)) {
49 NL_SET_ERR_MSG(extack, "Invalid AUTH/CRYPT/COMP attribute length");
50 return -EINVAL;
51 }
52
53 switch (type) {
54 case XFRMA_ALG_AUTH:
55 case XFRMA_ALG_CRYPT:
56 case XFRMA_ALG_COMP:
57 break;
58
59 default:
60 NL_SET_ERR_MSG(extack, "Invalid algorithm attribute type");
61 return -EINVAL;
62 }
63
64 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
65 return 0;
66 }
67
verify_auth_trunc(struct nlattr ** attrs,struct netlink_ext_ack * extack)68 static int verify_auth_trunc(struct nlattr **attrs,
69 struct netlink_ext_ack *extack)
70 {
71 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
72 struct xfrm_algo_auth *algp;
73
74 if (!rt)
75 return 0;
76
77 algp = nla_data(rt);
78 if (nla_len(rt) < (int)xfrm_alg_auth_len(algp)) {
79 NL_SET_ERR_MSG(extack, "Invalid AUTH_TRUNC attribute length");
80 return -EINVAL;
81 }
82
83 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
84 return 0;
85 }
86
verify_aead(struct nlattr ** attrs,struct netlink_ext_ack * extack)87 static int verify_aead(struct nlattr **attrs, struct netlink_ext_ack *extack)
88 {
89 struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
90 struct xfrm_algo_aead *algp;
91
92 if (!rt)
93 return 0;
94
95 algp = nla_data(rt);
96 if (nla_len(rt) < (int)aead_len(algp)) {
97 NL_SET_ERR_MSG(extack, "Invalid AEAD attribute length");
98 return -EINVAL;
99 }
100
101 algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
102 return 0;
103 }
104
verify_one_addr(struct nlattr ** attrs,enum xfrm_attr_type_t type,xfrm_address_t ** addrp)105 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
106 xfrm_address_t **addrp)
107 {
108 struct nlattr *rt = attrs[type];
109
110 if (rt && addrp)
111 *addrp = nla_data(rt);
112 }
113
verify_sec_ctx_len(struct nlattr ** attrs,struct netlink_ext_ack * extack)114 static inline int verify_sec_ctx_len(struct nlattr **attrs, struct netlink_ext_ack *extack)
115 {
116 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
117 struct xfrm_user_sec_ctx *uctx;
118
119 if (!rt)
120 return 0;
121
122 uctx = nla_data(rt);
123 if (uctx->len > nla_len(rt) ||
124 uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) {
125 NL_SET_ERR_MSG(extack, "Invalid security context length");
126 return -EINVAL;
127 }
128
129 return 0;
130 }
131
verify_replay(struct xfrm_usersa_info * p,struct nlattr ** attrs,u8 sa_dir,struct netlink_ext_ack * extack)132 static inline int verify_replay(struct xfrm_usersa_info *p,
133 struct nlattr **attrs, u8 sa_dir,
134 struct netlink_ext_ack *extack)
135 {
136 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
137 struct xfrm_replay_state_esn *rs;
138
139 if (!rt) {
140 if (p->flags & XFRM_STATE_ESN) {
141 NL_SET_ERR_MSG(extack, "Missing required attribute for ESN");
142 return -EINVAL;
143 }
144 return 0;
145 }
146
147 rs = nla_data(rt);
148
149 if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) {
150 NL_SET_ERR_MSG(extack, "ESN bitmap length must be <= 128");
151 return -EINVAL;
152 }
153
154 if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
155 nla_len(rt) != sizeof(*rs)) {
156 NL_SET_ERR_MSG(extack, "ESN attribute is too short to fit the full bitmap length");
157 return -EINVAL;
158 }
159
160 /* As only ESP and AH support ESN feature. */
161 if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) {
162 NL_SET_ERR_MSG(extack, "ESN only supported for ESP and AH");
163 return -EINVAL;
164 }
165
166 if (p->replay_window != 0) {
167 NL_SET_ERR_MSG(extack, "ESN not compatible with legacy replay_window");
168 return -EINVAL;
169 }
170
171 if (sa_dir == XFRM_SA_DIR_OUT) {
172 if (rs->replay_window) {
173 NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA");
174 return -EINVAL;
175 }
176 if (rs->seq || rs->seq_hi) {
177 NL_SET_ERR_MSG(extack,
178 "Replay seq and seq_hi should be 0 for output SA");
179 return -EINVAL;
180 }
181
182 if (!(p->flags & XFRM_STATE_ESN)) {
183 if (rs->oseq_hi) {
184 NL_SET_ERR_MSG(
185 extack,
186 "Replay oseq_hi should be 0 in non-ESN mode for output SA");
187 return -EINVAL;
188 }
189 if (rs->oseq == U32_MAX) {
190 NL_SET_ERR_MSG(
191 extack,
192 "Replay oseq should be less than 0xFFFFFFFF in non-ESN mode for output SA");
193 return -EINVAL;
194 }
195 } else {
196 if (rs->oseq == U32_MAX && rs->oseq_hi == U32_MAX) {
197 NL_SET_ERR_MSG(
198 extack,
199 "Replay oseq and oseq_hi should be less than 0xFFFFFFFF for output SA");
200 return -EINVAL;
201 }
202 }
203 if (rs->bmp_len) {
204 NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA");
205 return -EINVAL;
206 }
207 }
208
209 if (sa_dir == XFRM_SA_DIR_IN) {
210 if (rs->oseq || rs->oseq_hi) {
211 NL_SET_ERR_MSG(extack,
212 "Replay oseq and oseq_hi should be 0 for input SA");
213 return -EINVAL;
214 }
215 if (!(p->flags & XFRM_STATE_ESN)) {
216 if (rs->seq_hi) {
217 NL_SET_ERR_MSG(
218 extack,
219 "Replay seq_hi should be 0 in non-ESN mode for input SA");
220 return -EINVAL;
221 }
222
223 if (rs->seq == U32_MAX) {
224 NL_SET_ERR_MSG(
225 extack,
226 "Replay seq should be less than 0xFFFFFFFF in non-ESN mode for input SA");
227 return -EINVAL;
228 }
229 } else {
230 if (rs->seq == U32_MAX && rs->seq_hi == U32_MAX) {
231 NL_SET_ERR_MSG(
232 extack,
233 "Replay seq and seq_hi should be less than 0xFFFFFFFF for input SA");
234 return -EINVAL;
235 }
236 }
237 }
238
239 return 0;
240 }
241
verify_newsa_info(struct xfrm_usersa_info * p,struct nlattr ** attrs,struct netlink_ext_ack * extack)242 static int verify_newsa_info(struct xfrm_usersa_info *p,
243 struct nlattr **attrs,
244 struct netlink_ext_ack *extack)
245 {
246 int err;
247 u8 sa_dir = nla_get_u8_default(attrs[XFRMA_SA_DIR], 0);
248 u16 family = p->sel.family;
249
250 err = -EINVAL;
251 switch (p->family) {
252 case AF_INET:
253 break;
254
255 case AF_INET6:
256 #if IS_ENABLED(CONFIG_IPV6)
257 break;
258 #else
259 err = -EAFNOSUPPORT;
260 NL_SET_ERR_MSG(extack, "IPv6 support disabled");
261 goto out;
262 #endif
263
264 default:
265 NL_SET_ERR_MSG(extack, "Invalid address family");
266 goto out;
267 }
268
269 if (!family && !(p->flags & XFRM_STATE_AF_UNSPEC))
270 family = p->family;
271
272 switch (family) {
273 case AF_UNSPEC:
274 break;
275
276 case AF_INET:
277 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) {
278 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)");
279 goto out;
280 }
281
282 break;
283
284 case AF_INET6:
285 #if IS_ENABLED(CONFIG_IPV6)
286 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) {
287 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)");
288 goto out;
289 }
290
291 break;
292 #else
293 NL_SET_ERR_MSG(extack, "IPv6 support disabled");
294 err = -EAFNOSUPPORT;
295 goto out;
296 #endif
297
298 default:
299 NL_SET_ERR_MSG(extack, "Invalid address family in selector");
300 goto out;
301 }
302
303 err = -EINVAL;
304 switch (p->id.proto) {
305 case IPPROTO_AH:
306 if (!attrs[XFRMA_ALG_AUTH] &&
307 !attrs[XFRMA_ALG_AUTH_TRUNC]) {
308 NL_SET_ERR_MSG(extack, "Missing required attribute for AH: AUTH_TRUNC or AUTH");
309 goto out;
310 }
311
312 if (attrs[XFRMA_ALG_AEAD] ||
313 attrs[XFRMA_ALG_CRYPT] ||
314 attrs[XFRMA_ALG_COMP] ||
315 attrs[XFRMA_TFCPAD]) {
316 NL_SET_ERR_MSG(extack, "Invalid attributes for AH: AEAD, CRYPT, COMP, TFCPAD");
317 goto out;
318 }
319 break;
320
321 case IPPROTO_ESP:
322 if (attrs[XFRMA_ALG_COMP]) {
323 NL_SET_ERR_MSG(extack, "Invalid attribute for ESP: COMP");
324 goto out;
325 }
326
327 if (!attrs[XFRMA_ALG_AUTH] &&
328 !attrs[XFRMA_ALG_AUTH_TRUNC] &&
329 !attrs[XFRMA_ALG_CRYPT] &&
330 !attrs[XFRMA_ALG_AEAD]) {
331 NL_SET_ERR_MSG(extack, "Missing required attribute for ESP: at least one of AUTH, AUTH_TRUNC, CRYPT, AEAD");
332 goto out;
333 }
334
335 if ((attrs[XFRMA_ALG_AUTH] ||
336 attrs[XFRMA_ALG_AUTH_TRUNC] ||
337 attrs[XFRMA_ALG_CRYPT]) &&
338 attrs[XFRMA_ALG_AEAD]) {
339 NL_SET_ERR_MSG(extack, "Invalid attribute combination for ESP: AEAD can't be used with AUTH, AUTH_TRUNC, CRYPT");
340 goto out;
341 }
342
343 if (attrs[XFRMA_TFCPAD] &&
344 p->mode != XFRM_MODE_TUNNEL) {
345 NL_SET_ERR_MSG(extack, "TFC padding can only be used in tunnel mode");
346 goto out;
347 }
348 if ((attrs[XFRMA_IPTFS_DROP_TIME] ||
349 attrs[XFRMA_IPTFS_REORDER_WINDOW] ||
350 attrs[XFRMA_IPTFS_DONT_FRAG] ||
351 attrs[XFRMA_IPTFS_INIT_DELAY] ||
352 attrs[XFRMA_IPTFS_MAX_QSIZE] ||
353 attrs[XFRMA_IPTFS_PKT_SIZE]) &&
354 p->mode != XFRM_MODE_IPTFS) {
355 NL_SET_ERR_MSG(extack, "IP-TFS options can only be used in IP-TFS mode");
356 goto out;
357 }
358 break;
359
360 case IPPROTO_COMP:
361 if (!attrs[XFRMA_ALG_COMP]) {
362 NL_SET_ERR_MSG(extack, "Missing required attribute for COMP: COMP");
363 goto out;
364 }
365
366 if (attrs[XFRMA_ALG_AEAD] ||
367 attrs[XFRMA_ALG_AUTH] ||
368 attrs[XFRMA_ALG_AUTH_TRUNC] ||
369 attrs[XFRMA_ALG_CRYPT] ||
370 attrs[XFRMA_TFCPAD]) {
371 NL_SET_ERR_MSG(extack, "Invalid attributes for COMP: AEAD, AUTH, AUTH_TRUNC, CRYPT, TFCPAD");
372 goto out;
373 }
374
375 if (ntohl(p->id.spi) >= 0x10000) {
376 NL_SET_ERR_MSG(extack, "SPI is too large for COMP (must be < 0x10000)");
377 goto out;
378 }
379 break;
380
381 #if IS_ENABLED(CONFIG_IPV6)
382 case IPPROTO_DSTOPTS:
383 case IPPROTO_ROUTING:
384 if (attrs[XFRMA_ALG_COMP] ||
385 attrs[XFRMA_ALG_AUTH] ||
386 attrs[XFRMA_ALG_AUTH_TRUNC] ||
387 attrs[XFRMA_ALG_AEAD] ||
388 attrs[XFRMA_ALG_CRYPT] ||
389 attrs[XFRMA_ENCAP] ||
390 attrs[XFRMA_SEC_CTX] ||
391 attrs[XFRMA_TFCPAD]) {
392 NL_SET_ERR_MSG(extack, "Invalid attributes for DSTOPTS/ROUTING");
393 goto out;
394 }
395
396 if (!attrs[XFRMA_COADDR]) {
397 NL_SET_ERR_MSG(extack, "Missing required COADDR attribute for DSTOPTS/ROUTING");
398 goto out;
399 }
400 break;
401 #endif
402
403 default:
404 NL_SET_ERR_MSG(extack, "Unsupported protocol");
405 goto out;
406 }
407
408 if ((err = verify_aead(attrs, extack)))
409 goto out;
410 if ((err = verify_auth_trunc(attrs, extack)))
411 goto out;
412 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH, extack)))
413 goto out;
414 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT, extack)))
415 goto out;
416 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP, extack)))
417 goto out;
418 if ((err = verify_sec_ctx_len(attrs, extack)))
419 goto out;
420 if ((err = verify_replay(p, attrs, sa_dir, extack)))
421 goto out;
422
423 err = -EINVAL;
424 switch (p->mode) {
425 case XFRM_MODE_TRANSPORT:
426 case XFRM_MODE_TUNNEL:
427 case XFRM_MODE_ROUTEOPTIMIZATION:
428 case XFRM_MODE_BEET:
429 break;
430 case XFRM_MODE_IPTFS:
431 if (p->id.proto != IPPROTO_ESP) {
432 NL_SET_ERR_MSG(extack, "IP-TFS mode only supported with ESP");
433 goto out;
434 }
435 if (sa_dir == 0) {
436 NL_SET_ERR_MSG(extack, "IP-TFS mode requires in or out direction attribute");
437 goto out;
438 }
439 break;
440
441 default:
442 NL_SET_ERR_MSG(extack, "Unsupported mode");
443 goto out;
444 }
445
446 err = 0;
447
448 if (attrs[XFRMA_MTIMER_THRESH]) {
449 if (!attrs[XFRMA_ENCAP]) {
450 NL_SET_ERR_MSG(extack, "MTIMER_THRESH attribute can only be set on ENCAP states");
451 err = -EINVAL;
452 goto out;
453 }
454
455 if (sa_dir == XFRM_SA_DIR_OUT) {
456 NL_SET_ERR_MSG(extack,
457 "MTIMER_THRESH attribute should not be set on output SA");
458 err = -EINVAL;
459 goto out;
460 }
461 }
462
463 if (sa_dir == XFRM_SA_DIR_OUT) {
464 if (p->flags & XFRM_STATE_DECAP_DSCP) {
465 NL_SET_ERR_MSG(extack, "Flag DECAP_DSCP should not be set for output SA");
466 err = -EINVAL;
467 goto out;
468 }
469
470 if (p->flags & XFRM_STATE_ICMP) {
471 NL_SET_ERR_MSG(extack, "Flag ICMP should not be set for output SA");
472 err = -EINVAL;
473 goto out;
474 }
475
476 if (p->flags & XFRM_STATE_WILDRECV) {
477 NL_SET_ERR_MSG(extack, "Flag WILDRECV should not be set for output SA");
478 err = -EINVAL;
479 goto out;
480 }
481
482 if (p->replay_window) {
483 NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA");
484 err = -EINVAL;
485 goto out;
486 }
487
488 if (attrs[XFRMA_IPTFS_DROP_TIME]) {
489 NL_SET_ERR_MSG(extack, "IP-TFS drop time should not be set for output SA");
490 err = -EINVAL;
491 goto out;
492 }
493
494 if (attrs[XFRMA_IPTFS_REORDER_WINDOW]) {
495 NL_SET_ERR_MSG(extack, "IP-TFS reorder window should not be set for output SA");
496 err = -EINVAL;
497 goto out;
498 }
499
500 if (attrs[XFRMA_REPLAY_VAL]) {
501 struct xfrm_replay_state *replay;
502
503 replay = nla_data(attrs[XFRMA_REPLAY_VAL]);
504
505 if (replay->seq || replay->bitmap) {
506 NL_SET_ERR_MSG(extack,
507 "Replay seq and bitmap should be 0 for output SA");
508 err = -EINVAL;
509 goto out;
510 }
511 }
512 }
513
514 if (sa_dir == XFRM_SA_DIR_IN) {
515 if (p->flags & XFRM_STATE_NOPMTUDISC) {
516 NL_SET_ERR_MSG(extack, "Flag NOPMTUDISC should not be set for input SA");
517 err = -EINVAL;
518 goto out;
519 }
520
521 if (attrs[XFRMA_SA_EXTRA_FLAGS]) {
522 u32 xflags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
523
524 if (xflags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP) {
525 NL_SET_ERR_MSG(extack, "Flag DONT_ENCAP_DSCP should not be set for input SA");
526 err = -EINVAL;
527 goto out;
528 }
529
530 if (xflags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP) {
531 NL_SET_ERR_MSG(extack, "Flag OSEQ_MAY_WRAP should not be set for input SA");
532 err = -EINVAL;
533 goto out;
534 }
535
536 }
537
538 if (attrs[XFRMA_IPTFS_DONT_FRAG]) {
539 NL_SET_ERR_MSG(extack, "IP-TFS don't fragment should not be set for input SA");
540 err = -EINVAL;
541 goto out;
542 }
543
544 if (attrs[XFRMA_IPTFS_INIT_DELAY]) {
545 NL_SET_ERR_MSG(extack, "IP-TFS initial delay should not be set for input SA");
546 err = -EINVAL;
547 goto out;
548 }
549
550 if (attrs[XFRMA_IPTFS_MAX_QSIZE]) {
551 NL_SET_ERR_MSG(extack, "IP-TFS max queue size should not be set for input SA");
552 err = -EINVAL;
553 goto out;
554 }
555
556 if (attrs[XFRMA_IPTFS_PKT_SIZE]) {
557 NL_SET_ERR_MSG(extack, "IP-TFS packet size should not be set for input SA");
558 err = -EINVAL;
559 goto out;
560 }
561 }
562
563 if (!sa_dir && attrs[XFRMA_SA_PCPU]) {
564 NL_SET_ERR_MSG(extack, "SA_PCPU only supported with SA_DIR");
565 err = -EINVAL;
566 goto out;
567 }
568
569 out:
570 return err;
571 }
572
attach_one_algo(struct xfrm_algo ** algpp,u8 * props,struct xfrm_algo_desc * (* get_byname)(const char *,int),struct nlattr * rta,struct netlink_ext_ack * extack)573 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
574 struct xfrm_algo_desc *(*get_byname)(const char *, int),
575 struct nlattr *rta, struct netlink_ext_ack *extack)
576 {
577 struct xfrm_algo *p, *ualg;
578 struct xfrm_algo_desc *algo;
579
580 if (!rta)
581 return 0;
582
583 ualg = nla_data(rta);
584
585 algo = get_byname(ualg->alg_name, 1);
586 if (!algo) {
587 NL_SET_ERR_MSG(extack, "Requested COMP algorithm not found");
588 return -ENOSYS;
589 }
590 *props = algo->desc.sadb_alg_id;
591
592 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
593 if (!p)
594 return -ENOMEM;
595
596 strscpy(p->alg_name, algo->name);
597 *algpp = p;
598 return 0;
599 }
600
attach_crypt(struct xfrm_state * x,struct nlattr * rta,struct netlink_ext_ack * extack)601 static int attach_crypt(struct xfrm_state *x, struct nlattr *rta,
602 struct netlink_ext_ack *extack)
603 {
604 struct xfrm_algo *p, *ualg;
605 struct xfrm_algo_desc *algo;
606
607 if (!rta)
608 return 0;
609
610 ualg = nla_data(rta);
611
612 algo = xfrm_ealg_get_byname(ualg->alg_name, 1);
613 if (!algo) {
614 NL_SET_ERR_MSG(extack, "Requested CRYPT algorithm not found");
615 return -ENOSYS;
616 }
617 x->props.ealgo = algo->desc.sadb_alg_id;
618
619 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
620 if (!p)
621 return -ENOMEM;
622
623 strscpy(p->alg_name, algo->name);
624 x->ealg = p;
625 x->geniv = algo->uinfo.encr.geniv;
626 return 0;
627 }
628
attach_auth(struct xfrm_algo_auth ** algpp,u8 * props,struct nlattr * rta,struct netlink_ext_ack * extack)629 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
630 struct nlattr *rta, struct netlink_ext_ack *extack)
631 {
632 struct xfrm_algo *ualg;
633 struct xfrm_algo_auth *p;
634 struct xfrm_algo_desc *algo;
635
636 if (!rta)
637 return 0;
638
639 ualg = nla_data(rta);
640
641 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
642 if (!algo) {
643 NL_SET_ERR_MSG(extack, "Requested AUTH algorithm not found");
644 return -ENOSYS;
645 }
646 *props = algo->desc.sadb_alg_id;
647
648 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
649 if (!p)
650 return -ENOMEM;
651
652 strscpy(p->alg_name, algo->name);
653 p->alg_key_len = ualg->alg_key_len;
654 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
655 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
656
657 *algpp = p;
658 return 0;
659 }
660
attach_auth_trunc(struct xfrm_algo_auth ** algpp,u8 * props,struct nlattr * rta,struct netlink_ext_ack * extack)661 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
662 struct nlattr *rta, struct netlink_ext_ack *extack)
663 {
664 struct xfrm_algo_auth *p, *ualg;
665 struct xfrm_algo_desc *algo;
666
667 if (!rta)
668 return 0;
669
670 ualg = nla_data(rta);
671
672 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
673 if (!algo) {
674 NL_SET_ERR_MSG(extack, "Requested AUTH_TRUNC algorithm not found");
675 return -ENOSYS;
676 }
677 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) {
678 NL_SET_ERR_MSG(extack, "Invalid length requested for truncated ICV");
679 return -EINVAL;
680 }
681 *props = algo->desc.sadb_alg_id;
682
683 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
684 if (!p)
685 return -ENOMEM;
686
687 strscpy(p->alg_name, algo->name);
688 if (!p->alg_trunc_len)
689 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
690
691 *algpp = p;
692 return 0;
693 }
694
attach_aead(struct xfrm_state * x,struct nlattr * rta,struct netlink_ext_ack * extack)695 static int attach_aead(struct xfrm_state *x, struct nlattr *rta,
696 struct netlink_ext_ack *extack)
697 {
698 struct xfrm_algo_aead *p, *ualg;
699 struct xfrm_algo_desc *algo;
700
701 if (!rta)
702 return 0;
703
704 ualg = nla_data(rta);
705
706 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
707 if (!algo) {
708 NL_SET_ERR_MSG(extack, "Requested AEAD algorithm not found");
709 return -ENOSYS;
710 }
711 x->props.ealgo = algo->desc.sadb_alg_id;
712
713 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
714 if (!p)
715 return -ENOMEM;
716
717 strscpy(p->alg_name, algo->name);
718 x->aead = p;
719 x->geniv = algo->uinfo.aead.geniv;
720 return 0;
721 }
722
xfrm_replay_verify_len(struct xfrm_replay_state_esn * replay_esn,struct nlattr * rp,struct netlink_ext_ack * extack)723 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
724 struct nlattr *rp,
725 struct netlink_ext_ack *extack)
726 {
727 struct xfrm_replay_state_esn *up;
728 unsigned int ulen;
729
730 if (!replay_esn || !rp)
731 return 0;
732
733 up = nla_data(rp);
734 ulen = xfrm_replay_state_esn_len(up);
735
736 /* Check the overall length and the internal bitmap length to avoid
737 * potential overflow. */
738 if (nla_len(rp) < (int)ulen) {
739 NL_SET_ERR_MSG(extack, "ESN attribute is too short");
740 return -EINVAL;
741 }
742
743 if (xfrm_replay_state_esn_len(replay_esn) != ulen) {
744 NL_SET_ERR_MSG(extack, "New ESN size doesn't match the existing SA's ESN size");
745 return -EINVAL;
746 }
747
748 if (replay_esn->bmp_len != up->bmp_len) {
749 NL_SET_ERR_MSG(extack, "New ESN bitmap size doesn't match the existing SA's ESN bitmap");
750 return -EINVAL;
751 }
752
753 if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) {
754 NL_SET_ERR_MSG(extack, "ESN replay window is longer than the bitmap");
755 return -EINVAL;
756 }
757
758 return 0;
759 }
760
xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn ** replay_esn,struct xfrm_replay_state_esn ** preplay_esn,struct nlattr * rta)761 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
762 struct xfrm_replay_state_esn **preplay_esn,
763 struct nlattr *rta)
764 {
765 struct xfrm_replay_state_esn *p, *pp, *up;
766 unsigned int klen, ulen;
767
768 if (!rta)
769 return 0;
770
771 up = nla_data(rta);
772 klen = xfrm_replay_state_esn_len(up);
773 ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up);
774
775 p = kzalloc(klen, GFP_KERNEL);
776 if (!p)
777 return -ENOMEM;
778
779 pp = kzalloc(klen, GFP_KERNEL);
780 if (!pp) {
781 kfree(p);
782 return -ENOMEM;
783 }
784
785 memcpy(p, up, ulen);
786 memcpy(pp, up, ulen);
787
788 *replay_esn = p;
789 *preplay_esn = pp;
790
791 return 0;
792 }
793
xfrm_user_sec_ctx_size(struct xfrm_sec_ctx * xfrm_ctx)794 static inline unsigned int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
795 {
796 unsigned int len = 0;
797
798 if (xfrm_ctx) {
799 len += sizeof(struct xfrm_user_sec_ctx);
800 len += xfrm_ctx->ctx_len;
801 }
802 return len;
803 }
804
copy_from_user_state(struct xfrm_state * x,struct xfrm_usersa_info * p)805 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
806 {
807 memcpy(&x->id, &p->id, sizeof(x->id));
808 memcpy(&x->sel, &p->sel, sizeof(x->sel));
809 memcpy(&x->lft, &p->lft, sizeof(x->lft));
810 x->props.mode = p->mode;
811 x->props.replay_window = min_t(unsigned int, p->replay_window,
812 sizeof(x->replay.bitmap) * 8);
813 x->props.reqid = p->reqid;
814 x->props.family = p->family;
815 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
816 x->props.flags = p->flags;
817
818 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
819 x->sel.family = p->family;
820 }
821
822 /*
823 * someday when pfkey also has support, we could have the code
824 * somehow made shareable and move it to xfrm_state.c - JHS
825 *
826 */
xfrm_update_ae_params(struct xfrm_state * x,struct nlattr ** attrs,int update_esn)827 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
828 int update_esn)
829 {
830 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
831 struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
832 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
833 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
834 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
835 struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
836
837 if (re && x->replay_esn && x->preplay_esn) {
838 struct xfrm_replay_state_esn *replay_esn;
839 replay_esn = nla_data(re);
840 memcpy(x->replay_esn, replay_esn,
841 xfrm_replay_state_esn_len(replay_esn));
842 memcpy(x->preplay_esn, replay_esn,
843 xfrm_replay_state_esn_len(replay_esn));
844 }
845
846 if (rp) {
847 struct xfrm_replay_state *replay;
848 replay = nla_data(rp);
849 memcpy(&x->replay, replay, sizeof(*replay));
850 memcpy(&x->preplay, replay, sizeof(*replay));
851 }
852
853 if (lt) {
854 struct xfrm_lifetime_cur *ltime;
855 ltime = nla_data(lt);
856 x->curlft.bytes = ltime->bytes;
857 x->curlft.packets = ltime->packets;
858 x->curlft.add_time = ltime->add_time;
859 x->curlft.use_time = ltime->use_time;
860 }
861
862 if (et)
863 x->replay_maxage = nla_get_u32(et);
864
865 if (rt)
866 x->replay_maxdiff = nla_get_u32(rt);
867
868 if (mt)
869 x->mapping_maxage = nla_get_u32(mt);
870 }
871
xfrm_smark_init(struct nlattr ** attrs,struct xfrm_mark * m)872 static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m)
873 {
874 if (attrs[XFRMA_SET_MARK]) {
875 m->v = nla_get_u32(attrs[XFRMA_SET_MARK]);
876 m->m = nla_get_u32_default(attrs[XFRMA_SET_MARK_MASK],
877 0xffffffff);
878 } else {
879 m->v = m->m = 0;
880 }
881 }
882
xfrm_state_construct(struct net * net,struct xfrm_usersa_info * p,struct nlattr ** attrs,int * errp,struct netlink_ext_ack * extack)883 static struct xfrm_state *xfrm_state_construct(struct net *net,
884 struct xfrm_usersa_info *p,
885 struct nlattr **attrs,
886 int *errp,
887 struct netlink_ext_ack *extack)
888 {
889 struct xfrm_state *x = xfrm_state_alloc(net);
890 int err = -ENOMEM;
891
892 if (!x)
893 goto error_no_put;
894
895 copy_from_user_state(x, p);
896
897 if (attrs[XFRMA_ENCAP]) {
898 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
899 sizeof(*x->encap), GFP_KERNEL);
900 if (x->encap == NULL)
901 goto error;
902 }
903
904 if (attrs[XFRMA_COADDR]) {
905 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
906 sizeof(*x->coaddr), GFP_KERNEL);
907 if (x->coaddr == NULL)
908 goto error;
909 }
910
911 if (attrs[XFRMA_SA_EXTRA_FLAGS])
912 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
913
914 if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD], extack)))
915 goto error;
916 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
917 attrs[XFRMA_ALG_AUTH_TRUNC], extack)))
918 goto error;
919 if (!x->props.aalgo) {
920 if ((err = attach_auth(&x->aalg, &x->props.aalgo,
921 attrs[XFRMA_ALG_AUTH], extack)))
922 goto error;
923 }
924 if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT], extack)))
925 goto error;
926 if ((err = attach_one_algo(&x->calg, &x->props.calgo,
927 xfrm_calg_get_byname,
928 attrs[XFRMA_ALG_COMP], extack)))
929 goto error;
930
931 if (attrs[XFRMA_TFCPAD])
932 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
933
934 xfrm_mark_get(attrs, &x->mark);
935
936 xfrm_smark_init(attrs, &x->props.smark);
937
938 if (attrs[XFRMA_IF_ID])
939 x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
940
941 if (attrs[XFRMA_SA_DIR])
942 x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]);
943
944 if (attrs[XFRMA_NAT_KEEPALIVE_INTERVAL])
945 x->nat_keepalive_interval =
946 nla_get_u32(attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]);
947
948 if (attrs[XFRMA_SA_PCPU]) {
949 x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]);
950 if (x->pcpu_num >= num_possible_cpus()) {
951 err = -ERANGE;
952 NL_SET_ERR_MSG(extack, "pCPU number too big");
953 goto error;
954 }
955 }
956
957 err = __xfrm_init_state(x, extack);
958 if (err)
959 goto error;
960
961 if (attrs[XFRMA_SEC_CTX]) {
962 err = security_xfrm_state_alloc(x,
963 nla_data(attrs[XFRMA_SEC_CTX]));
964 if (err)
965 goto error;
966 }
967
968 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
969 attrs[XFRMA_REPLAY_ESN_VAL])))
970 goto error;
971
972 x->km.seq = p->seq;
973 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
974 /* sysctl_xfrm_aevent_etime is in 100ms units */
975 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
976
977 if ((err = xfrm_init_replay(x, extack)))
978 goto error;
979
980 /* override default values from above */
981 xfrm_update_ae_params(x, attrs, 0);
982
983 xfrm_set_type_offload(x, attrs[XFRMA_OFFLOAD_DEV]);
984 /* configure the hardware if offload is requested */
985 if (attrs[XFRMA_OFFLOAD_DEV]) {
986 err = xfrm_dev_state_add(net, x,
987 nla_data(attrs[XFRMA_OFFLOAD_DEV]),
988 extack);
989 if (err)
990 goto error;
991 }
992
993 if (x->mode_cbs && x->mode_cbs->user_init) {
994 err = x->mode_cbs->user_init(net, x, attrs, extack);
995 if (err)
996 goto error;
997 }
998
999 return x;
1000
1001 error:
1002 x->km.state = XFRM_STATE_DEAD;
1003 xfrm_state_put(x);
1004 error_no_put:
1005 *errp = err;
1006 return NULL;
1007 }
1008
xfrm_add_sa(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1009 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1010 struct nlattr **attrs, struct netlink_ext_ack *extack)
1011 {
1012 struct net *net = sock_net(skb->sk);
1013 struct xfrm_usersa_info *p = nlmsg_data(nlh);
1014 struct xfrm_state *x;
1015 int err;
1016 struct km_event c;
1017
1018 err = verify_newsa_info(p, attrs, extack);
1019 if (err)
1020 return err;
1021
1022 x = xfrm_state_construct(net, p, attrs, &err, extack);
1023 if (!x)
1024 return err;
1025
1026 xfrm_state_hold(x);
1027 if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
1028 err = xfrm_state_add(x);
1029 else
1030 err = xfrm_state_update(x);
1031
1032 xfrm_audit_state_add(x, err ? 0 : 1, true);
1033
1034 if (err < 0) {
1035 x->km.state = XFRM_STATE_DEAD;
1036 xfrm_dev_state_delete(x);
1037 __xfrm_state_put(x);
1038 goto out;
1039 }
1040
1041 if (x->km.state == XFRM_STATE_VOID)
1042 x->km.state = XFRM_STATE_VALID;
1043
1044 c.seq = nlh->nlmsg_seq;
1045 c.portid = nlh->nlmsg_pid;
1046 c.event = nlh->nlmsg_type;
1047
1048 km_state_notify(x, &c);
1049 out:
1050 xfrm_state_put(x);
1051 return err;
1052 }
1053
xfrm_user_state_lookup(struct net * net,struct xfrm_usersa_id * p,struct nlattr ** attrs,int * errp)1054 static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
1055 struct xfrm_usersa_id *p,
1056 struct nlattr **attrs,
1057 int *errp)
1058 {
1059 struct xfrm_state *x = NULL;
1060 struct xfrm_mark m;
1061 int err;
1062 u32 mark = xfrm_mark_get(attrs, &m);
1063
1064 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
1065 err = -ESRCH;
1066 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
1067 } else {
1068 xfrm_address_t *saddr = NULL;
1069
1070 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
1071 if (!saddr) {
1072 err = -EINVAL;
1073 goto out;
1074 }
1075
1076 err = -ESRCH;
1077 x = xfrm_state_lookup_byaddr(net, mark,
1078 &p->daddr, saddr,
1079 p->proto, p->family);
1080 }
1081
1082 out:
1083 if (!x && errp)
1084 *errp = err;
1085 return x;
1086 }
1087
xfrm_del_sa(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1088 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1089 struct nlattr **attrs, struct netlink_ext_ack *extack)
1090 {
1091 struct net *net = sock_net(skb->sk);
1092 struct xfrm_state *x;
1093 int err = -ESRCH;
1094 struct km_event c;
1095 struct xfrm_usersa_id *p = nlmsg_data(nlh);
1096
1097 x = xfrm_user_state_lookup(net, p, attrs, &err);
1098 if (x == NULL)
1099 return err;
1100
1101 if ((err = security_xfrm_state_delete(x)) != 0)
1102 goto out;
1103
1104 if (xfrm_state_kern(x)) {
1105 NL_SET_ERR_MSG(extack, "SA is in use by tunnels");
1106 err = -EPERM;
1107 goto out;
1108 }
1109
1110 err = xfrm_state_delete(x);
1111 if (err < 0)
1112 goto out;
1113
1114 c.seq = nlh->nlmsg_seq;
1115 c.portid = nlh->nlmsg_pid;
1116 c.event = nlh->nlmsg_type;
1117 km_state_notify(x, &c);
1118
1119 out:
1120 xfrm_audit_state_delete(x, err ? 0 : 1, true);
1121 xfrm_state_put(x);
1122 return err;
1123 }
1124
copy_to_user_state(struct xfrm_state * x,struct xfrm_usersa_info * p)1125 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
1126 {
1127 memset(p, 0, sizeof(*p));
1128 memcpy(&p->id, &x->id, sizeof(p->id));
1129 memcpy(&p->sel, &x->sel, sizeof(p->sel));
1130 memcpy(&p->lft, &x->lft, sizeof(p->lft));
1131 if (x->xso.dev)
1132 xfrm_dev_state_update_stats(x);
1133 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
1134 put_unaligned(x->stats.replay_window, &p->stats.replay_window);
1135 put_unaligned(x->stats.replay, &p->stats.replay);
1136 put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
1137 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
1138 p->mode = x->props.mode;
1139 p->replay_window = x->props.replay_window;
1140 p->reqid = x->props.reqid;
1141 p->family = x->props.family;
1142 p->flags = x->props.flags;
1143 p->seq = x->km.seq;
1144 }
1145
1146 struct xfrm_dump_info {
1147 struct sk_buff *in_skb;
1148 struct sk_buff *out_skb;
1149 u32 nlmsg_seq;
1150 u16 nlmsg_flags;
1151 };
1152
copy_sec_ctx(struct xfrm_sec_ctx * s,struct sk_buff * skb)1153 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
1154 {
1155 struct xfrm_user_sec_ctx *uctx;
1156 struct nlattr *attr;
1157 int ctx_size = sizeof(*uctx) + s->ctx_len;
1158
1159 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
1160 if (attr == NULL)
1161 return -EMSGSIZE;
1162
1163 uctx = nla_data(attr);
1164 uctx->exttype = XFRMA_SEC_CTX;
1165 uctx->len = ctx_size;
1166 uctx->ctx_doi = s->ctx_doi;
1167 uctx->ctx_alg = s->ctx_alg;
1168 uctx->ctx_len = s->ctx_len;
1169 memcpy(uctx + 1, s->ctx_str, s->ctx_len);
1170
1171 return 0;
1172 }
1173
copy_user_offload(struct xfrm_dev_offload * xso,struct sk_buff * skb)1174 static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb)
1175 {
1176 struct xfrm_user_offload *xuo;
1177 struct nlattr *attr;
1178
1179 attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo));
1180 if (attr == NULL)
1181 return -EMSGSIZE;
1182
1183 xuo = nla_data(attr);
1184 memset(xuo, 0, sizeof(*xuo));
1185 xuo->ifindex = xso->dev->ifindex;
1186 if (xso->dir == XFRM_DEV_OFFLOAD_IN)
1187 xuo->flags = XFRM_OFFLOAD_INBOUND;
1188 if (xso->type == XFRM_DEV_OFFLOAD_PACKET)
1189 xuo->flags |= XFRM_OFFLOAD_PACKET;
1190
1191 return 0;
1192 }
1193
xfrm_redact(void)1194 static bool xfrm_redact(void)
1195 {
1196 return IS_ENABLED(CONFIG_SECURITY) &&
1197 security_locked_down(LOCKDOWN_XFRM_SECRET);
1198 }
1199
copy_to_user_auth(struct xfrm_algo_auth * auth,struct sk_buff * skb)1200 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
1201 {
1202 struct xfrm_algo *algo;
1203 struct xfrm_algo_auth *ap;
1204 struct nlattr *nla;
1205 bool redact_secret = xfrm_redact();
1206
1207 nla = nla_reserve(skb, XFRMA_ALG_AUTH,
1208 sizeof(*algo) + (auth->alg_key_len + 7) / 8);
1209 if (!nla)
1210 return -EMSGSIZE;
1211 algo = nla_data(nla);
1212 strscpy_pad(algo->alg_name, auth->alg_name);
1213
1214 if (redact_secret && auth->alg_key_len)
1215 memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8);
1216 else
1217 memcpy(algo->alg_key, auth->alg_key,
1218 (auth->alg_key_len + 7) / 8);
1219 algo->alg_key_len = auth->alg_key_len;
1220
1221 nla = nla_reserve(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(auth));
1222 if (!nla)
1223 return -EMSGSIZE;
1224 ap = nla_data(nla);
1225 strscpy_pad(ap->alg_name, auth->alg_name);
1226 ap->alg_key_len = auth->alg_key_len;
1227 ap->alg_trunc_len = auth->alg_trunc_len;
1228 if (redact_secret && auth->alg_key_len)
1229 memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8);
1230 else
1231 memcpy(ap->alg_key, auth->alg_key,
1232 (auth->alg_key_len + 7) / 8);
1233 return 0;
1234 }
1235
copy_to_user_aead(struct xfrm_algo_aead * aead,struct sk_buff * skb)1236 static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb)
1237 {
1238 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_AEAD, aead_len(aead));
1239 struct xfrm_algo_aead *ap;
1240 bool redact_secret = xfrm_redact();
1241
1242 if (!nla)
1243 return -EMSGSIZE;
1244
1245 ap = nla_data(nla);
1246 strscpy_pad(ap->alg_name, aead->alg_name);
1247 ap->alg_key_len = aead->alg_key_len;
1248 ap->alg_icv_len = aead->alg_icv_len;
1249
1250 if (redact_secret && aead->alg_key_len)
1251 memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8);
1252 else
1253 memcpy(ap->alg_key, aead->alg_key,
1254 (aead->alg_key_len + 7) / 8);
1255 return 0;
1256 }
1257
copy_to_user_ealg(struct xfrm_algo * ealg,struct sk_buff * skb)1258 static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
1259 {
1260 struct xfrm_algo *ap;
1261 bool redact_secret = xfrm_redact();
1262 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_CRYPT,
1263 xfrm_alg_len(ealg));
1264 if (!nla)
1265 return -EMSGSIZE;
1266
1267 ap = nla_data(nla);
1268 strscpy_pad(ap->alg_name, ealg->alg_name);
1269 ap->alg_key_len = ealg->alg_key_len;
1270
1271 if (redact_secret && ealg->alg_key_len)
1272 memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8);
1273 else
1274 memcpy(ap->alg_key, ealg->alg_key,
1275 (ealg->alg_key_len + 7) / 8);
1276
1277 return 0;
1278 }
1279
copy_to_user_calg(struct xfrm_algo * calg,struct sk_buff * skb)1280 static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb)
1281 {
1282 struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg));
1283 struct xfrm_algo *ap;
1284
1285 if (!nla)
1286 return -EMSGSIZE;
1287
1288 ap = nla_data(nla);
1289 strscpy_pad(ap->alg_name, calg->alg_name);
1290 ap->alg_key_len = 0;
1291
1292 return 0;
1293 }
1294
copy_to_user_encap(struct xfrm_encap_tmpl * ep,struct sk_buff * skb)1295 static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb)
1296 {
1297 struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep));
1298 struct xfrm_encap_tmpl *uep;
1299
1300 if (!nla)
1301 return -EMSGSIZE;
1302
1303 uep = nla_data(nla);
1304 memset(uep, 0, sizeof(*uep));
1305
1306 uep->encap_type = ep->encap_type;
1307 uep->encap_sport = ep->encap_sport;
1308 uep->encap_dport = ep->encap_dport;
1309 uep->encap_oa = ep->encap_oa;
1310
1311 return 0;
1312 }
1313
xfrm_smark_put(struct sk_buff * skb,struct xfrm_mark * m)1314 static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
1315 {
1316 int ret = 0;
1317
1318 if (m->v | m->m) {
1319 ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v);
1320 if (!ret)
1321 ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m);
1322 }
1323 return ret;
1324 }
1325
1326 /* Don't change this without updating xfrm_sa_len! */
copy_to_user_state_extra(struct xfrm_state * x,struct xfrm_usersa_info * p,struct sk_buff * skb)1327 static int copy_to_user_state_extra(struct xfrm_state *x,
1328 struct xfrm_usersa_info *p,
1329 struct sk_buff *skb)
1330 {
1331 int ret = 0;
1332
1333 copy_to_user_state(x, p);
1334
1335 if (x->props.extra_flags) {
1336 ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
1337 x->props.extra_flags);
1338 if (ret)
1339 goto out;
1340 }
1341
1342 if (x->coaddr) {
1343 ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
1344 if (ret)
1345 goto out;
1346 }
1347 if (x->lastused) {
1348 ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
1349 XFRMA_PAD);
1350 if (ret)
1351 goto out;
1352 }
1353 if (x->aead) {
1354 ret = copy_to_user_aead(x->aead, skb);
1355 if (ret)
1356 goto out;
1357 }
1358 if (x->aalg) {
1359 ret = copy_to_user_auth(x->aalg, skb);
1360 if (ret)
1361 goto out;
1362 }
1363 if (x->ealg) {
1364 ret = copy_to_user_ealg(x->ealg, skb);
1365 if (ret)
1366 goto out;
1367 }
1368 if (x->calg) {
1369 ret = copy_to_user_calg(x->calg, skb);
1370 if (ret)
1371 goto out;
1372 }
1373 if (x->encap) {
1374 ret = copy_to_user_encap(x->encap, skb);
1375 if (ret)
1376 goto out;
1377 }
1378 if (x->tfcpad) {
1379 ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
1380 if (ret)
1381 goto out;
1382 }
1383 ret = xfrm_mark_put(skb, &x->mark);
1384 if (ret)
1385 goto out;
1386
1387 ret = xfrm_smark_put(skb, &x->props.smark);
1388 if (ret)
1389 goto out;
1390
1391 if (x->replay_esn)
1392 ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1393 xfrm_replay_state_esn_len(x->replay_esn),
1394 x->replay_esn);
1395 else
1396 ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
1397 &x->replay);
1398 if (ret)
1399 goto out;
1400 if(x->xso.dev)
1401 ret = copy_user_offload(&x->xso, skb);
1402 if (ret)
1403 goto out;
1404 if (x->if_id) {
1405 ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id);
1406 if (ret)
1407 goto out;
1408 }
1409 if (x->security) {
1410 ret = copy_sec_ctx(x->security, skb);
1411 if (ret)
1412 goto out;
1413 }
1414 if (x->mode_cbs && x->mode_cbs->copy_to_user)
1415 ret = x->mode_cbs->copy_to_user(x, skb);
1416 if (ret)
1417 goto out;
1418 if (x->mapping_maxage) {
1419 ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage);
1420 if (ret)
1421 goto out;
1422 }
1423 if (x->pcpu_num != UINT_MAX) {
1424 ret = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
1425 if (ret)
1426 goto out;
1427 }
1428 if (x->dir)
1429 ret = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
1430
1431 if (x->nat_keepalive_interval) {
1432 ret = nla_put_u32(skb, XFRMA_NAT_KEEPALIVE_INTERVAL,
1433 x->nat_keepalive_interval);
1434 if (ret)
1435 goto out;
1436 }
1437 out:
1438 return ret;
1439 }
1440
dump_one_state(struct xfrm_state * x,int count,void * ptr)1441 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
1442 {
1443 struct xfrm_dump_info *sp = ptr;
1444 struct sk_buff *in_skb = sp->in_skb;
1445 struct sk_buff *skb = sp->out_skb;
1446 struct xfrm_translator *xtr;
1447 struct xfrm_usersa_info *p;
1448 struct nlmsghdr *nlh;
1449 int err;
1450
1451 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
1452 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
1453 if (nlh == NULL)
1454 return -EMSGSIZE;
1455
1456 p = nlmsg_data(nlh);
1457
1458 err = copy_to_user_state_extra(x, p, skb);
1459 if (err) {
1460 nlmsg_cancel(skb, nlh);
1461 return err;
1462 }
1463 nlmsg_end(skb, nlh);
1464
1465 xtr = xfrm_get_translator();
1466 if (xtr) {
1467 err = xtr->alloc_compat(skb, nlh);
1468
1469 xfrm_put_translator(xtr);
1470 if (err) {
1471 nlmsg_cancel(skb, nlh);
1472 return err;
1473 }
1474 }
1475
1476 return 0;
1477 }
1478
xfrm_dump_sa_done(struct netlink_callback * cb)1479 static int xfrm_dump_sa_done(struct netlink_callback *cb)
1480 {
1481 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
1482 struct sock *sk = cb->skb->sk;
1483 struct net *net = sock_net(sk);
1484
1485 if (cb->args[0])
1486 xfrm_state_walk_done(walk, net);
1487 return 0;
1488 }
1489
xfrm_dump_sa(struct sk_buff * skb,struct netlink_callback * cb)1490 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
1491 {
1492 struct net *net = sock_net(skb->sk);
1493 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
1494 struct xfrm_dump_info info;
1495
1496 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
1497 sizeof(cb->args) - sizeof(cb->args[0]));
1498
1499 info.in_skb = cb->skb;
1500 info.out_skb = skb;
1501 info.nlmsg_seq = cb->nlh->nlmsg_seq;
1502 info.nlmsg_flags = NLM_F_MULTI;
1503
1504 if (!cb->args[0]) {
1505 struct nlattr *attrs[XFRMA_MAX+1];
1506 struct xfrm_address_filter *filter = NULL;
1507 u8 proto = 0;
1508 int err;
1509
1510 err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX,
1511 xfrma_policy, cb->extack);
1512 if (err < 0)
1513 return err;
1514
1515 if (attrs[XFRMA_ADDRESS_FILTER]) {
1516 filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
1517 sizeof(*filter), GFP_KERNEL);
1518 if (filter == NULL)
1519 return -ENOMEM;
1520
1521 /* see addr_match(), (prefix length >> 5) << 2
1522 * will be used to compare xfrm_address_t
1523 */
1524 if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
1525 filter->dplen > (sizeof(xfrm_address_t) << 3)) {
1526 kfree(filter);
1527 return -EINVAL;
1528 }
1529 }
1530
1531 if (attrs[XFRMA_PROTO])
1532 proto = nla_get_u8(attrs[XFRMA_PROTO]);
1533
1534 xfrm_state_walk_init(walk, proto, filter);
1535 cb->args[0] = 1;
1536 }
1537
1538 (void) xfrm_state_walk(net, walk, dump_one_state, &info);
1539
1540 return skb->len;
1541 }
1542
xfrm_state_netlink(struct sk_buff * in_skb,struct xfrm_state * x,u32 seq)1543 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
1544 struct xfrm_state *x, u32 seq)
1545 {
1546 struct xfrm_dump_info info;
1547 struct sk_buff *skb;
1548 int err;
1549
1550 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1551 if (!skb)
1552 return ERR_PTR(-ENOMEM);
1553
1554 info.in_skb = in_skb;
1555 info.out_skb = skb;
1556 info.nlmsg_seq = seq;
1557 info.nlmsg_flags = 0;
1558
1559 err = dump_one_state(x, 0, &info);
1560 if (err) {
1561 kfree_skb(skb);
1562 return ERR_PTR(err);
1563 }
1564
1565 return skb;
1566 }
1567
1568 /* A wrapper for nlmsg_multicast() checking that nlsk is still available.
1569 * Must be called with RCU read lock.
1570 */
xfrm_nlmsg_multicast(struct net * net,struct sk_buff * skb,u32 pid,unsigned int group)1571 static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
1572 u32 pid, unsigned int group)
1573 {
1574 struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
1575 struct xfrm_translator *xtr;
1576
1577 if (!nlsk) {
1578 kfree_skb(skb);
1579 return -EPIPE;
1580 }
1581
1582 xtr = xfrm_get_translator();
1583 if (xtr) {
1584 int err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
1585
1586 xfrm_put_translator(xtr);
1587 if (err) {
1588 kfree_skb(skb);
1589 return err;
1590 }
1591 }
1592
1593 return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
1594 }
1595
xfrm_spdinfo_msgsize(void)1596 static inline unsigned int xfrm_spdinfo_msgsize(void)
1597 {
1598 return NLMSG_ALIGN(4)
1599 + nla_total_size(sizeof(struct xfrmu_spdinfo))
1600 + nla_total_size(sizeof(struct xfrmu_spdhinfo))
1601 + nla_total_size(sizeof(struct xfrmu_spdhthresh))
1602 + nla_total_size(sizeof(struct xfrmu_spdhthresh));
1603 }
1604
build_spdinfo(struct sk_buff * skb,struct net * net,u32 portid,u32 seq,u32 flags)1605 static int build_spdinfo(struct sk_buff *skb, struct net *net,
1606 u32 portid, u32 seq, u32 flags)
1607 {
1608 struct xfrmk_spdinfo si;
1609 struct xfrmu_spdinfo spc;
1610 struct xfrmu_spdhinfo sph;
1611 struct xfrmu_spdhthresh spt4, spt6;
1612 struct nlmsghdr *nlh;
1613 int err;
1614 u32 *f;
1615 unsigned lseq;
1616
1617 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
1618 if (nlh == NULL) /* shouldn't really happen ... */
1619 return -EMSGSIZE;
1620
1621 f = nlmsg_data(nlh);
1622 *f = flags;
1623 xfrm_spd_getinfo(net, &si);
1624 spc.incnt = si.incnt;
1625 spc.outcnt = si.outcnt;
1626 spc.fwdcnt = si.fwdcnt;
1627 spc.inscnt = si.inscnt;
1628 spc.outscnt = si.outscnt;
1629 spc.fwdscnt = si.fwdscnt;
1630 sph.spdhcnt = si.spdhcnt;
1631 sph.spdhmcnt = si.spdhmcnt;
1632
1633 do {
1634 lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1635
1636 spt4.lbits = net->xfrm.policy_hthresh.lbits4;
1637 spt4.rbits = net->xfrm.policy_hthresh.rbits4;
1638 spt6.lbits = net->xfrm.policy_hthresh.lbits6;
1639 spt6.rbits = net->xfrm.policy_hthresh.rbits6;
1640 } while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq));
1641
1642 err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
1643 if (!err)
1644 err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
1645 if (!err)
1646 err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4);
1647 if (!err)
1648 err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6);
1649 if (err) {
1650 nlmsg_cancel(skb, nlh);
1651 return err;
1652 }
1653
1654 nlmsg_end(skb, nlh);
1655 return 0;
1656 }
1657
xfrm_set_spdinfo(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1658 static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1659 struct nlattr **attrs,
1660 struct netlink_ext_ack *extack)
1661 {
1662 struct net *net = sock_net(skb->sk);
1663 struct xfrmu_spdhthresh *thresh4 = NULL;
1664 struct xfrmu_spdhthresh *thresh6 = NULL;
1665
1666 /* selector prefixlen thresholds to hash policies */
1667 if (attrs[XFRMA_SPD_IPV4_HTHRESH]) {
1668 struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH];
1669
1670 if (nla_len(rta) < sizeof(*thresh4)) {
1671 NL_SET_ERR_MSG(extack, "Invalid SPD_IPV4_HTHRESH attribute length");
1672 return -EINVAL;
1673 }
1674 thresh4 = nla_data(rta);
1675 if (thresh4->lbits > 32 || thresh4->rbits > 32) {
1676 NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 32 for IPv4)");
1677 return -EINVAL;
1678 }
1679 }
1680 if (attrs[XFRMA_SPD_IPV6_HTHRESH]) {
1681 struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH];
1682
1683 if (nla_len(rta) < sizeof(*thresh6)) {
1684 NL_SET_ERR_MSG(extack, "Invalid SPD_IPV6_HTHRESH attribute length");
1685 return -EINVAL;
1686 }
1687 thresh6 = nla_data(rta);
1688 if (thresh6->lbits > 128 || thresh6->rbits > 128) {
1689 NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 128 for IPv6)");
1690 return -EINVAL;
1691 }
1692 }
1693
1694 if (thresh4 || thresh6) {
1695 write_seqlock(&net->xfrm.policy_hthresh.lock);
1696 if (thresh4) {
1697 net->xfrm.policy_hthresh.lbits4 = thresh4->lbits;
1698 net->xfrm.policy_hthresh.rbits4 = thresh4->rbits;
1699 }
1700 if (thresh6) {
1701 net->xfrm.policy_hthresh.lbits6 = thresh6->lbits;
1702 net->xfrm.policy_hthresh.rbits6 = thresh6->rbits;
1703 }
1704 write_sequnlock(&net->xfrm.policy_hthresh.lock);
1705
1706 xfrm_policy_hash_rebuild(net);
1707 }
1708
1709 return 0;
1710 }
1711
xfrm_get_spdinfo(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1712 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1713 struct nlattr **attrs,
1714 struct netlink_ext_ack *extack)
1715 {
1716 struct net *net = sock_net(skb->sk);
1717 struct sk_buff *r_skb;
1718 u32 *flags = nlmsg_data(nlh);
1719 u32 sportid = NETLINK_CB(skb).portid;
1720 u32 seq = nlh->nlmsg_seq;
1721 int err;
1722
1723 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
1724 if (r_skb == NULL)
1725 return -ENOMEM;
1726
1727 err = build_spdinfo(r_skb, net, sportid, seq, *flags);
1728 BUG_ON(err < 0);
1729
1730 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1731 }
1732
xfrm_sadinfo_msgsize(void)1733 static inline unsigned int xfrm_sadinfo_msgsize(void)
1734 {
1735 return NLMSG_ALIGN(4)
1736 + nla_total_size(sizeof(struct xfrmu_sadhinfo))
1737 + nla_total_size(4); /* XFRMA_SAD_CNT */
1738 }
1739
build_sadinfo(struct sk_buff * skb,struct net * net,u32 portid,u32 seq,u32 flags)1740 static int build_sadinfo(struct sk_buff *skb, struct net *net,
1741 u32 portid, u32 seq, u32 flags)
1742 {
1743 struct xfrmk_sadinfo si;
1744 struct xfrmu_sadhinfo sh;
1745 struct nlmsghdr *nlh;
1746 int err;
1747 u32 *f;
1748
1749 nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
1750 if (nlh == NULL) /* shouldn't really happen ... */
1751 return -EMSGSIZE;
1752
1753 f = nlmsg_data(nlh);
1754 *f = flags;
1755 xfrm_sad_getinfo(net, &si);
1756
1757 sh.sadhmcnt = si.sadhmcnt;
1758 sh.sadhcnt = si.sadhcnt;
1759
1760 err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
1761 if (!err)
1762 err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
1763 if (err) {
1764 nlmsg_cancel(skb, nlh);
1765 return err;
1766 }
1767
1768 nlmsg_end(skb, nlh);
1769 return 0;
1770 }
1771
xfrm_get_sadinfo(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1772 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1773 struct nlattr **attrs,
1774 struct netlink_ext_ack *extack)
1775 {
1776 struct net *net = sock_net(skb->sk);
1777 struct sk_buff *r_skb;
1778 u32 *flags = nlmsg_data(nlh);
1779 u32 sportid = NETLINK_CB(skb).portid;
1780 u32 seq = nlh->nlmsg_seq;
1781 int err;
1782
1783 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
1784 if (r_skb == NULL)
1785 return -ENOMEM;
1786
1787 err = build_sadinfo(r_skb, net, sportid, seq, *flags);
1788 BUG_ON(err < 0);
1789
1790 return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1791 }
1792
xfrm_get_sa(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1793 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1794 struct nlattr **attrs, struct netlink_ext_ack *extack)
1795 {
1796 struct net *net = sock_net(skb->sk);
1797 struct xfrm_usersa_id *p = nlmsg_data(nlh);
1798 struct xfrm_state *x;
1799 struct sk_buff *resp_skb;
1800 int err = -ESRCH;
1801
1802 x = xfrm_user_state_lookup(net, p, attrs, &err);
1803 if (x == NULL)
1804 goto out_noput;
1805
1806 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1807 if (IS_ERR(resp_skb)) {
1808 err = PTR_ERR(resp_skb);
1809 } else {
1810 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1811 }
1812 xfrm_state_put(x);
1813 out_noput:
1814 return err;
1815 }
1816
xfrm_alloc_userspi(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)1817 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1818 struct nlattr **attrs,
1819 struct netlink_ext_ack *extack)
1820 {
1821 struct net *net = sock_net(skb->sk);
1822 struct xfrm_state *x;
1823 struct xfrm_userspi_info *p;
1824 struct xfrm_translator *xtr;
1825 struct sk_buff *resp_skb;
1826 xfrm_address_t *daddr;
1827 int family;
1828 int err;
1829 u32 mark;
1830 struct xfrm_mark m;
1831 u32 if_id = 0;
1832 u32 pcpu_num = UINT_MAX;
1833
1834 p = nlmsg_data(nlh);
1835 err = verify_spi_info(p->info.id.proto, p->min, p->max, extack);
1836 if (err)
1837 goto out_noput;
1838
1839 family = p->info.family;
1840 daddr = &p->info.id.daddr;
1841
1842 x = NULL;
1843
1844 mark = xfrm_mark_get(attrs, &m);
1845
1846 if (attrs[XFRMA_IF_ID])
1847 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
1848
1849 if (attrs[XFRMA_SA_PCPU]) {
1850 pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]);
1851 if (pcpu_num >= num_possible_cpus()) {
1852 err = -EINVAL;
1853 goto out_noput;
1854 }
1855 }
1856
1857 if (p->info.seq) {
1858 x = xfrm_find_acq_byseq(net, mark, p->info.seq, pcpu_num);
1859 if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
1860 xfrm_state_put(x);
1861 x = NULL;
1862 }
1863 }
1864
1865 if (!x)
1866 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
1867 if_id, pcpu_num, p->info.id.proto, daddr,
1868 &p->info.saddr, 1,
1869 family);
1870 err = -ENOENT;
1871 if (!x) {
1872 NL_SET_ERR_MSG(extack, "Target ACQUIRE not found");
1873 goto out_noput;
1874 }
1875
1876 err = xfrm_alloc_spi(x, p->min, p->max, extack);
1877 if (err)
1878 goto out;
1879
1880 if (attrs[XFRMA_SA_DIR])
1881 x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]);
1882
1883 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1884 if (IS_ERR(resp_skb)) {
1885 err = PTR_ERR(resp_skb);
1886 goto out;
1887 }
1888
1889 xtr = xfrm_get_translator();
1890 if (xtr) {
1891 err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
1892
1893 xfrm_put_translator(xtr);
1894 if (err) {
1895 kfree_skb(resp_skb);
1896 goto out;
1897 }
1898 }
1899
1900 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1901
1902 out:
1903 xfrm_state_put(x);
1904 out_noput:
1905 return err;
1906 }
1907
verify_policy_dir(u8 dir,struct netlink_ext_ack * extack)1908 static int verify_policy_dir(u8 dir, struct netlink_ext_ack *extack)
1909 {
1910 switch (dir) {
1911 case XFRM_POLICY_IN:
1912 case XFRM_POLICY_OUT:
1913 case XFRM_POLICY_FWD:
1914 break;
1915
1916 default:
1917 NL_SET_ERR_MSG(extack, "Invalid policy direction");
1918 return -EINVAL;
1919 }
1920
1921 return 0;
1922 }
1923
verify_policy_type(u8 type,struct netlink_ext_ack * extack)1924 static int verify_policy_type(u8 type, struct netlink_ext_ack *extack)
1925 {
1926 switch (type) {
1927 case XFRM_POLICY_TYPE_MAIN:
1928 #ifdef CONFIG_XFRM_SUB_POLICY
1929 case XFRM_POLICY_TYPE_SUB:
1930 #endif
1931 break;
1932
1933 default:
1934 NL_SET_ERR_MSG(extack, "Invalid policy type");
1935 return -EINVAL;
1936 }
1937
1938 return 0;
1939 }
1940
verify_newpolicy_info(struct xfrm_userpolicy_info * p,struct netlink_ext_ack * extack)1941 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p,
1942 struct netlink_ext_ack *extack)
1943 {
1944 int ret;
1945
1946 switch (p->share) {
1947 case XFRM_SHARE_ANY:
1948 case XFRM_SHARE_SESSION:
1949 case XFRM_SHARE_USER:
1950 case XFRM_SHARE_UNIQUE:
1951 break;
1952
1953 default:
1954 NL_SET_ERR_MSG(extack, "Invalid policy share");
1955 return -EINVAL;
1956 }
1957
1958 switch (p->action) {
1959 case XFRM_POLICY_ALLOW:
1960 case XFRM_POLICY_BLOCK:
1961 break;
1962
1963 default:
1964 NL_SET_ERR_MSG(extack, "Invalid policy action");
1965 return -EINVAL;
1966 }
1967
1968 switch (p->sel.family) {
1969 case AF_INET:
1970 if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) {
1971 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)");
1972 return -EINVAL;
1973 }
1974
1975 break;
1976
1977 case AF_INET6:
1978 #if IS_ENABLED(CONFIG_IPV6)
1979 if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) {
1980 NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)");
1981 return -EINVAL;
1982 }
1983
1984 break;
1985 #else
1986 NL_SET_ERR_MSG(extack, "IPv6 support disabled");
1987 return -EAFNOSUPPORT;
1988 #endif
1989
1990 default:
1991 NL_SET_ERR_MSG(extack, "Invalid selector family");
1992 return -EINVAL;
1993 }
1994
1995 ret = verify_policy_dir(p->dir, extack);
1996 if (ret)
1997 return ret;
1998 if (p->index && (xfrm_policy_id2dir(p->index) != p->dir)) {
1999 NL_SET_ERR_MSG(extack, "Policy index doesn't match direction");
2000 return -EINVAL;
2001 }
2002
2003 return 0;
2004 }
2005
copy_from_user_sec_ctx(struct xfrm_policy * pol,struct nlattr ** attrs)2006 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
2007 {
2008 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2009 struct xfrm_user_sec_ctx *uctx;
2010
2011 if (!rt)
2012 return 0;
2013
2014 uctx = nla_data(rt);
2015 return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
2016 }
2017
copy_templates(struct xfrm_policy * xp,struct xfrm_user_tmpl * ut,int nr)2018 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
2019 int nr)
2020 {
2021 int i;
2022
2023 xp->xfrm_nr = nr;
2024 for (i = 0; i < nr; i++, ut++) {
2025 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
2026
2027 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
2028 memcpy(&t->saddr, &ut->saddr,
2029 sizeof(xfrm_address_t));
2030 t->reqid = ut->reqid;
2031 t->mode = ut->mode;
2032 t->share = ut->share;
2033 t->optional = ut->optional;
2034 t->aalgos = ut->aalgos;
2035 t->ealgos = ut->ealgos;
2036 t->calgos = ut->calgos;
2037 /* If all masks are ~0, then we allow all algorithms. */
2038 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
2039 t->encap_family = ut->family;
2040 }
2041 }
2042
validate_tmpl(int nr,struct xfrm_user_tmpl * ut,u16 family,int dir,struct netlink_ext_ack * extack)2043 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
2044 int dir, struct netlink_ext_ack *extack)
2045 {
2046 u16 prev_family;
2047 int i;
2048
2049 if (nr > XFRM_MAX_DEPTH) {
2050 NL_SET_ERR_MSG(extack, "Template count must be <= XFRM_MAX_DEPTH (" __stringify(XFRM_MAX_DEPTH) ")");
2051 return -EINVAL;
2052 }
2053
2054 prev_family = family;
2055
2056 for (i = 0; i < nr; i++) {
2057 /* We never validated the ut->family value, so many
2058 * applications simply leave it at zero. The check was
2059 * never made and ut->family was ignored because all
2060 * templates could be assumed to have the same family as
2061 * the policy itself. Now that we will have ipv4-in-ipv6
2062 * and ipv6-in-ipv4 tunnels, this is no longer true.
2063 */
2064 if (!ut[i].family)
2065 ut[i].family = family;
2066
2067 switch (ut[i].mode) {
2068 case XFRM_MODE_TUNNEL:
2069 case XFRM_MODE_BEET:
2070 if (ut[i].optional && dir == XFRM_POLICY_OUT) {
2071 NL_SET_ERR_MSG(extack, "Mode in optional template not allowed in outbound policy");
2072 return -EINVAL;
2073 }
2074 break;
2075 case XFRM_MODE_IPTFS:
2076 break;
2077 default:
2078 if (ut[i].family != prev_family) {
2079 NL_SET_ERR_MSG(extack, "Mode in template doesn't support a family change");
2080 return -EINVAL;
2081 }
2082 break;
2083 }
2084 if (ut[i].mode >= XFRM_MODE_MAX) {
2085 NL_SET_ERR_MSG(extack, "Mode in template must be < XFRM_MODE_MAX (" __stringify(XFRM_MODE_MAX) ")");
2086 return -EINVAL;
2087 }
2088
2089 prev_family = ut[i].family;
2090
2091 switch (ut[i].family) {
2092 case AF_INET:
2093 break;
2094 #if IS_ENABLED(CONFIG_IPV6)
2095 case AF_INET6:
2096 break;
2097 #endif
2098 default:
2099 NL_SET_ERR_MSG(extack, "Invalid family in template");
2100 return -EINVAL;
2101 }
2102
2103 if (!xfrm_id_proto_valid(ut[i].id.proto)) {
2104 NL_SET_ERR_MSG(extack, "Invalid XFRM protocol in template");
2105 return -EINVAL;
2106 }
2107 }
2108
2109 return 0;
2110 }
2111
copy_from_user_tmpl(struct xfrm_policy * pol,struct nlattr ** attrs,int dir,struct netlink_ext_ack * extack)2112 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs,
2113 int dir, struct netlink_ext_ack *extack)
2114 {
2115 struct nlattr *rt = attrs[XFRMA_TMPL];
2116
2117 if (!rt) {
2118 pol->xfrm_nr = 0;
2119 } else {
2120 struct xfrm_user_tmpl *utmpl = nla_data(rt);
2121 int nr = nla_len(rt) / sizeof(*utmpl);
2122 int err;
2123
2124 err = validate_tmpl(nr, utmpl, pol->family, dir, extack);
2125 if (err)
2126 return err;
2127
2128 copy_templates(pol, utmpl, nr);
2129 }
2130 return 0;
2131 }
2132
copy_from_user_policy_type(u8 * tp,struct nlattr ** attrs,struct netlink_ext_ack * extack)2133 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs,
2134 struct netlink_ext_ack *extack)
2135 {
2136 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
2137 struct xfrm_userpolicy_type *upt;
2138 u8 type = XFRM_POLICY_TYPE_MAIN;
2139 int err;
2140
2141 if (rt) {
2142 upt = nla_data(rt);
2143 type = upt->type;
2144 }
2145
2146 err = verify_policy_type(type, extack);
2147 if (err)
2148 return err;
2149
2150 *tp = type;
2151 return 0;
2152 }
2153
copy_from_user_policy(struct xfrm_policy * xp,struct xfrm_userpolicy_info * p)2154 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
2155 {
2156 xp->priority = p->priority;
2157 xp->index = p->index;
2158 memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
2159 memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
2160 xp->action = p->action;
2161 xp->flags = p->flags;
2162 xp->family = p->sel.family;
2163 /* XXX xp->share = p->share; */
2164 }
2165
copy_to_user_policy(struct xfrm_policy * xp,struct xfrm_userpolicy_info * p,int dir)2166 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
2167 {
2168 memset(p, 0, sizeof(*p));
2169 memcpy(&p->sel, &xp->selector, sizeof(p->sel));
2170 memcpy(&p->lft, &xp->lft, sizeof(p->lft));
2171 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
2172 p->priority = xp->priority;
2173 p->index = xp->index;
2174 p->sel.family = xp->family;
2175 p->dir = dir;
2176 p->action = xp->action;
2177 p->flags = xp->flags;
2178 p->share = XFRM_SHARE_ANY; /* XXX xp->share */
2179 }
2180
xfrm_policy_construct(struct net * net,struct xfrm_userpolicy_info * p,struct nlattr ** attrs,int * errp,struct netlink_ext_ack * extack)2181 static struct xfrm_policy *xfrm_policy_construct(struct net *net,
2182 struct xfrm_userpolicy_info *p,
2183 struct nlattr **attrs,
2184 int *errp,
2185 struct netlink_ext_ack *extack)
2186 {
2187 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
2188 int err;
2189
2190 if (!xp) {
2191 *errp = -ENOMEM;
2192 return NULL;
2193 }
2194
2195 copy_from_user_policy(xp, p);
2196
2197 err = copy_from_user_policy_type(&xp->type, attrs, extack);
2198 if (err)
2199 goto error;
2200
2201 if (!(err = copy_from_user_tmpl(xp, attrs, p->dir, extack)))
2202 err = copy_from_user_sec_ctx(xp, attrs);
2203 if (err)
2204 goto error;
2205
2206 xfrm_mark_get(attrs, &xp->mark);
2207
2208 if (attrs[XFRMA_IF_ID])
2209 xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2210
2211 /* configure the hardware if offload is requested */
2212 if (attrs[XFRMA_OFFLOAD_DEV]) {
2213 err = xfrm_dev_policy_add(net, xp,
2214 nla_data(attrs[XFRMA_OFFLOAD_DEV]),
2215 p->dir, extack);
2216 if (err)
2217 goto error;
2218 }
2219
2220 return xp;
2221 error:
2222 *errp = err;
2223 xp->walk.dead = 1;
2224 xfrm_policy_destroy(xp);
2225 return NULL;
2226 }
2227
xfrm_add_policy(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2228 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2229 struct nlattr **attrs,
2230 struct netlink_ext_ack *extack)
2231 {
2232 struct net *net = sock_net(skb->sk);
2233 struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
2234 struct xfrm_policy *xp;
2235 struct km_event c;
2236 int err;
2237 int excl;
2238
2239 err = verify_newpolicy_info(p, extack);
2240 if (err)
2241 return err;
2242 err = verify_sec_ctx_len(attrs, extack);
2243 if (err)
2244 return err;
2245
2246 xp = xfrm_policy_construct(net, p, attrs, &err, extack);
2247 if (!xp)
2248 return err;
2249
2250 /* shouldn't excl be based on nlh flags??
2251 * Aha! this is anti-netlink really i.e more pfkey derived
2252 * in netlink excl is a flag and you wouldn't need
2253 * a type XFRM_MSG_UPDPOLICY - JHS */
2254 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
2255 err = xfrm_policy_insert(p->dir, xp, excl);
2256 xfrm_audit_policy_add(xp, err ? 0 : 1, true);
2257
2258 if (err) {
2259 xfrm_dev_policy_delete(xp);
2260 xfrm_dev_policy_free(xp);
2261 security_xfrm_policy_free(xp->security);
2262 kfree(xp);
2263 return err;
2264 }
2265
2266 c.event = nlh->nlmsg_type;
2267 c.seq = nlh->nlmsg_seq;
2268 c.portid = nlh->nlmsg_pid;
2269 km_policy_notify(xp, p->dir, &c);
2270
2271 xfrm_pol_put(xp);
2272
2273 return 0;
2274 }
2275
copy_to_user_tmpl(struct xfrm_policy * xp,struct sk_buff * skb)2276 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
2277 {
2278 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
2279 int i;
2280
2281 if (xp->xfrm_nr == 0)
2282 return 0;
2283
2284 if (xp->xfrm_nr > XFRM_MAX_DEPTH)
2285 return -ENOBUFS;
2286
2287 for (i = 0; i < xp->xfrm_nr; i++) {
2288 struct xfrm_user_tmpl *up = &vec[i];
2289 struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
2290
2291 memset(up, 0, sizeof(*up));
2292 memcpy(&up->id, &kp->id, sizeof(up->id));
2293 up->family = kp->encap_family;
2294 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
2295 up->reqid = kp->reqid;
2296 up->mode = kp->mode;
2297 up->share = kp->share;
2298 up->optional = kp->optional;
2299 up->aalgos = kp->aalgos;
2300 up->ealgos = kp->ealgos;
2301 up->calgos = kp->calgos;
2302 }
2303
2304 return nla_put(skb, XFRMA_TMPL,
2305 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
2306 }
2307
copy_to_user_state_sec_ctx(struct xfrm_state * x,struct sk_buff * skb)2308 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
2309 {
2310 if (x->security) {
2311 return copy_sec_ctx(x->security, skb);
2312 }
2313 return 0;
2314 }
2315
copy_to_user_sec_ctx(struct xfrm_policy * xp,struct sk_buff * skb)2316 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
2317 {
2318 if (xp->security)
2319 return copy_sec_ctx(xp->security, skb);
2320 return 0;
2321 }
userpolicy_type_attrsize(void)2322 static inline unsigned int userpolicy_type_attrsize(void)
2323 {
2324 #ifdef CONFIG_XFRM_SUB_POLICY
2325 return nla_total_size(sizeof(struct xfrm_userpolicy_type));
2326 #else
2327 return 0;
2328 #endif
2329 }
2330
2331 #ifdef CONFIG_XFRM_SUB_POLICY
copy_to_user_policy_type(u8 type,struct sk_buff * skb)2332 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
2333 {
2334 struct xfrm_userpolicy_type upt;
2335
2336 /* Sadly there are two holes in struct xfrm_userpolicy_type */
2337 memset(&upt, 0, sizeof(upt));
2338 upt.type = type;
2339
2340 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
2341 }
2342
2343 #else
copy_to_user_policy_type(u8 type,struct sk_buff * skb)2344 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
2345 {
2346 return 0;
2347 }
2348 #endif
2349
dump_one_policy(struct xfrm_policy * xp,int dir,int count,void * ptr)2350 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
2351 {
2352 struct xfrm_dump_info *sp = ptr;
2353 struct xfrm_userpolicy_info *p;
2354 struct sk_buff *in_skb = sp->in_skb;
2355 struct sk_buff *skb = sp->out_skb;
2356 struct xfrm_translator *xtr;
2357 struct nlmsghdr *nlh;
2358 int err;
2359
2360 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
2361 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
2362 if (nlh == NULL)
2363 return -EMSGSIZE;
2364
2365 p = nlmsg_data(nlh);
2366 copy_to_user_policy(xp, p, dir);
2367 err = copy_to_user_tmpl(xp, skb);
2368 if (!err)
2369 err = copy_to_user_sec_ctx(xp, skb);
2370 if (!err)
2371 err = copy_to_user_policy_type(xp->type, skb);
2372 if (!err)
2373 err = xfrm_mark_put(skb, &xp->mark);
2374 if (!err)
2375 err = xfrm_if_id_put(skb, xp->if_id);
2376 if (!err && xp->xdo.dev)
2377 err = copy_user_offload(&xp->xdo, skb);
2378 if (err) {
2379 nlmsg_cancel(skb, nlh);
2380 return err;
2381 }
2382 nlmsg_end(skb, nlh);
2383
2384 xtr = xfrm_get_translator();
2385 if (xtr) {
2386 err = xtr->alloc_compat(skb, nlh);
2387
2388 xfrm_put_translator(xtr);
2389 if (err) {
2390 nlmsg_cancel(skb, nlh);
2391 return err;
2392 }
2393 }
2394
2395 return 0;
2396 }
2397
xfrm_dump_policy_done(struct netlink_callback * cb)2398 static int xfrm_dump_policy_done(struct netlink_callback *cb)
2399 {
2400 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
2401 struct net *net = sock_net(cb->skb->sk);
2402
2403 xfrm_policy_walk_done(walk, net);
2404 return 0;
2405 }
2406
xfrm_dump_policy_start(struct netlink_callback * cb)2407 static int xfrm_dump_policy_start(struct netlink_callback *cb)
2408 {
2409 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
2410
2411 BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
2412
2413 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
2414 return 0;
2415 }
2416
xfrm_dump_policy(struct sk_buff * skb,struct netlink_callback * cb)2417 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
2418 {
2419 struct net *net = sock_net(skb->sk);
2420 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
2421 struct xfrm_dump_info info;
2422
2423 info.in_skb = cb->skb;
2424 info.out_skb = skb;
2425 info.nlmsg_seq = cb->nlh->nlmsg_seq;
2426 info.nlmsg_flags = NLM_F_MULTI;
2427
2428 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
2429
2430 return skb->len;
2431 }
2432
xfrm_policy_netlink(struct sk_buff * in_skb,struct xfrm_policy * xp,int dir,u32 seq)2433 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
2434 struct xfrm_policy *xp,
2435 int dir, u32 seq)
2436 {
2437 struct xfrm_dump_info info;
2438 struct sk_buff *skb;
2439 int err;
2440
2441 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2442 if (!skb)
2443 return ERR_PTR(-ENOMEM);
2444
2445 info.in_skb = in_skb;
2446 info.out_skb = skb;
2447 info.nlmsg_seq = seq;
2448 info.nlmsg_flags = 0;
2449
2450 err = dump_one_policy(xp, dir, 0, &info);
2451 if (err) {
2452 kfree_skb(skb);
2453 return ERR_PTR(err);
2454 }
2455
2456 return skb;
2457 }
2458
xfrm_notify_userpolicy(struct net * net)2459 static int xfrm_notify_userpolicy(struct net *net)
2460 {
2461 struct xfrm_userpolicy_default *up;
2462 int len = NLMSG_ALIGN(sizeof(*up));
2463 struct nlmsghdr *nlh;
2464 struct sk_buff *skb;
2465 int err;
2466
2467 skb = nlmsg_new(len, GFP_ATOMIC);
2468 if (skb == NULL)
2469 return -ENOMEM;
2470
2471 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0);
2472 if (nlh == NULL) {
2473 kfree_skb(skb);
2474 return -EMSGSIZE;
2475 }
2476
2477 up = nlmsg_data(nlh);
2478 up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
2479 up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
2480 up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
2481
2482 nlmsg_end(skb, nlh);
2483
2484 rcu_read_lock();
2485 err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
2486 rcu_read_unlock();
2487
2488 return err;
2489 }
2490
xfrm_userpolicy_is_valid(__u8 policy)2491 static bool xfrm_userpolicy_is_valid(__u8 policy)
2492 {
2493 return policy == XFRM_USERPOLICY_BLOCK ||
2494 policy == XFRM_USERPOLICY_ACCEPT;
2495 }
2496
xfrm_set_default(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2497 static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
2498 struct nlattr **attrs, struct netlink_ext_ack *extack)
2499 {
2500 struct net *net = sock_net(skb->sk);
2501 struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
2502
2503 if (xfrm_userpolicy_is_valid(up->in))
2504 net->xfrm.policy_default[XFRM_POLICY_IN] = up->in;
2505
2506 if (xfrm_userpolicy_is_valid(up->fwd))
2507 net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd;
2508
2509 if (xfrm_userpolicy_is_valid(up->out))
2510 net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out;
2511
2512 rt_genid_bump_all(net);
2513
2514 xfrm_notify_userpolicy(net);
2515 return 0;
2516 }
2517
xfrm_get_default(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2518 static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
2519 struct nlattr **attrs, struct netlink_ext_ack *extack)
2520 {
2521 struct sk_buff *r_skb;
2522 struct nlmsghdr *r_nlh;
2523 struct net *net = sock_net(skb->sk);
2524 struct xfrm_userpolicy_default *r_up;
2525 int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default));
2526 u32 portid = NETLINK_CB(skb).portid;
2527 u32 seq = nlh->nlmsg_seq;
2528
2529 r_skb = nlmsg_new(len, GFP_ATOMIC);
2530 if (!r_skb)
2531 return -ENOMEM;
2532
2533 r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0);
2534 if (!r_nlh) {
2535 kfree_skb(r_skb);
2536 return -EMSGSIZE;
2537 }
2538
2539 r_up = nlmsg_data(r_nlh);
2540 r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
2541 r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
2542 r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
2543 nlmsg_end(r_skb, r_nlh);
2544
2545 return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
2546 }
2547
xfrm_get_policy(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2548 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2549 struct nlattr **attrs,
2550 struct netlink_ext_ack *extack)
2551 {
2552 struct net *net = sock_net(skb->sk);
2553 struct xfrm_policy *xp;
2554 struct xfrm_userpolicy_id *p;
2555 u8 type = XFRM_POLICY_TYPE_MAIN;
2556 int err;
2557 struct km_event c;
2558 int delete;
2559 struct xfrm_mark m;
2560 u32 if_id = 0;
2561
2562 p = nlmsg_data(nlh);
2563 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
2564
2565 err = copy_from_user_policy_type(&type, attrs, extack);
2566 if (err)
2567 return err;
2568
2569 err = verify_policy_dir(p->dir, extack);
2570 if (err)
2571 return err;
2572
2573 if (attrs[XFRMA_IF_ID])
2574 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2575
2576 xfrm_mark_get(attrs, &m);
2577
2578 if (p->index)
2579 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
2580 p->index, delete, &err);
2581 else {
2582 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2583 struct xfrm_sec_ctx *ctx;
2584
2585 err = verify_sec_ctx_len(attrs, extack);
2586 if (err)
2587 return err;
2588
2589 ctx = NULL;
2590 if (rt) {
2591 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
2592
2593 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
2594 if (err)
2595 return err;
2596 }
2597 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2598 &p->sel, ctx, delete, &err);
2599 security_xfrm_policy_free(ctx);
2600 }
2601 if (xp == NULL)
2602 return -ENOENT;
2603
2604 if (!delete) {
2605 struct sk_buff *resp_skb;
2606
2607 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
2608 if (IS_ERR(resp_skb)) {
2609 err = PTR_ERR(resp_skb);
2610 } else {
2611 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
2612 NETLINK_CB(skb).portid);
2613 }
2614 } else {
2615 xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
2616
2617 if (err != 0)
2618 goto out;
2619
2620 c.data.byid = p->index;
2621 c.event = nlh->nlmsg_type;
2622 c.seq = nlh->nlmsg_seq;
2623 c.portid = nlh->nlmsg_pid;
2624 km_policy_notify(xp, p->dir, &c);
2625 }
2626
2627 out:
2628 xfrm_pol_put(xp);
2629 return err;
2630 }
2631
xfrm_flush_sa(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2632 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
2633 struct nlattr **attrs,
2634 struct netlink_ext_ack *extack)
2635 {
2636 struct net *net = sock_net(skb->sk);
2637 struct km_event c;
2638 struct xfrm_usersa_flush *p = nlmsg_data(nlh);
2639 int err;
2640
2641 err = xfrm_state_flush(net, p->proto, true);
2642 if (err) {
2643 if (err == -ESRCH) /* empty table */
2644 return 0;
2645 return err;
2646 }
2647 c.data.proto = p->proto;
2648 c.event = nlh->nlmsg_type;
2649 c.seq = nlh->nlmsg_seq;
2650 c.portid = nlh->nlmsg_pid;
2651 c.net = net;
2652 km_state_notify(NULL, &c);
2653
2654 return 0;
2655 }
2656
xfrm_aevent_msgsize(struct xfrm_state * x)2657 static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x)
2658 {
2659 unsigned int replay_size = x->replay_esn ?
2660 xfrm_replay_state_esn_len(x->replay_esn) :
2661 sizeof(struct xfrm_replay_state);
2662
2663 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
2664 + nla_total_size(replay_size)
2665 + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
2666 + nla_total_size(sizeof(struct xfrm_mark))
2667 + nla_total_size(4) /* XFRM_AE_RTHR */
2668 + nla_total_size(4) /* XFRM_AE_ETHR */
2669 + nla_total_size(sizeof(x->dir)) /* XFRMA_SA_DIR */
2670 + nla_total_size(4); /* XFRMA_SA_PCPU */
2671 }
2672
build_aevent(struct sk_buff * skb,struct xfrm_state * x,const struct km_event * c)2673 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2674 {
2675 struct xfrm_aevent_id *id;
2676 struct nlmsghdr *nlh;
2677 int err;
2678
2679 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
2680 if (nlh == NULL)
2681 return -EMSGSIZE;
2682
2683 id = nlmsg_data(nlh);
2684 memset(&id->sa_id, 0, sizeof(id->sa_id));
2685 memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
2686 id->sa_id.spi = x->id.spi;
2687 id->sa_id.family = x->props.family;
2688 id->sa_id.proto = x->id.proto;
2689 memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr));
2690 id->reqid = x->props.reqid;
2691 id->flags = c->data.aevent;
2692
2693 if (x->replay_esn) {
2694 err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
2695 xfrm_replay_state_esn_len(x->replay_esn),
2696 x->replay_esn);
2697 } else {
2698 err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
2699 &x->replay);
2700 }
2701 if (err)
2702 goto out_cancel;
2703 err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
2704 XFRMA_PAD);
2705 if (err)
2706 goto out_cancel;
2707
2708 if (id->flags & XFRM_AE_RTHR) {
2709 err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
2710 if (err)
2711 goto out_cancel;
2712 }
2713 if (id->flags & XFRM_AE_ETHR) {
2714 err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
2715 x->replay_maxage * 10 / HZ);
2716 if (err)
2717 goto out_cancel;
2718 }
2719 err = xfrm_mark_put(skb, &x->mark);
2720 if (err)
2721 goto out_cancel;
2722
2723 err = xfrm_if_id_put(skb, x->if_id);
2724 if (err)
2725 goto out_cancel;
2726 if (x->pcpu_num != UINT_MAX) {
2727 err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
2728 if (err)
2729 goto out_cancel;
2730 }
2731
2732 if (x->dir) {
2733 err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
2734 if (err)
2735 goto out_cancel;
2736 }
2737
2738 nlmsg_end(skb, nlh);
2739 return 0;
2740
2741 out_cancel:
2742 nlmsg_cancel(skb, nlh);
2743 return err;
2744 }
2745
xfrm_get_ae(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2746 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
2747 struct nlattr **attrs, struct netlink_ext_ack *extack)
2748 {
2749 struct net *net = sock_net(skb->sk);
2750 struct xfrm_state *x;
2751 struct sk_buff *r_skb;
2752 int err;
2753 struct km_event c;
2754 u32 mark;
2755 struct xfrm_mark m;
2756 struct xfrm_aevent_id *p = nlmsg_data(nlh);
2757 struct xfrm_usersa_id *id = &p->sa_id;
2758
2759 mark = xfrm_mark_get(attrs, &m);
2760
2761 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
2762 if (x == NULL)
2763 return -ESRCH;
2764
2765 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
2766 if (r_skb == NULL) {
2767 xfrm_state_put(x);
2768 return -ENOMEM;
2769 }
2770
2771 /*
2772 * XXX: is this lock really needed - none of the other
2773 * gets lock (the concern is things getting updated
2774 * while we are still reading) - jhs
2775 */
2776 spin_lock_bh(&x->lock);
2777 c.data.aevent = p->flags;
2778 c.seq = nlh->nlmsg_seq;
2779 c.portid = nlh->nlmsg_pid;
2780
2781 err = build_aevent(r_skb, x, &c);
2782 BUG_ON(err < 0);
2783
2784 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
2785 spin_unlock_bh(&x->lock);
2786 xfrm_state_put(x);
2787 return err;
2788 }
2789
xfrm_new_ae(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2790 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
2791 struct nlattr **attrs, struct netlink_ext_ack *extack)
2792 {
2793 struct net *net = sock_net(skb->sk);
2794 struct xfrm_state *x;
2795 struct km_event c;
2796 int err = -EINVAL;
2797 u32 mark = 0;
2798 struct xfrm_mark m;
2799 struct xfrm_aevent_id *p = nlmsg_data(nlh);
2800 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
2801 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
2802 struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
2803 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
2804 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
2805
2806 if (!lt && !rp && !re && !et && !rt) {
2807 NL_SET_ERR_MSG(extack, "Missing required attribute for AE");
2808 return err;
2809 }
2810
2811 /* pedantic mode - thou shalt sayeth replaceth */
2812 if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) {
2813 NL_SET_ERR_MSG(extack, "NLM_F_REPLACE flag is required");
2814 return err;
2815 }
2816
2817 mark = xfrm_mark_get(attrs, &m);
2818
2819 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
2820 if (x == NULL)
2821 return -ESRCH;
2822
2823 if (x->km.state != XFRM_STATE_VALID) {
2824 NL_SET_ERR_MSG(extack, "SA must be in VALID state");
2825 goto out;
2826 }
2827
2828 err = xfrm_replay_verify_len(x->replay_esn, re, extack);
2829 if (err)
2830 goto out;
2831
2832 spin_lock_bh(&x->lock);
2833 xfrm_update_ae_params(x, attrs, 1);
2834 spin_unlock_bh(&x->lock);
2835
2836 c.event = nlh->nlmsg_type;
2837 c.seq = nlh->nlmsg_seq;
2838 c.portid = nlh->nlmsg_pid;
2839 c.data.aevent = XFRM_AE_CU;
2840 km_state_notify(x, &c);
2841 err = 0;
2842 out:
2843 xfrm_state_put(x);
2844 return err;
2845 }
2846
xfrm_flush_policy(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2847 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2848 struct nlattr **attrs,
2849 struct netlink_ext_ack *extack)
2850 {
2851 struct net *net = sock_net(skb->sk);
2852 struct km_event c;
2853 u8 type = XFRM_POLICY_TYPE_MAIN;
2854 int err;
2855
2856 err = copy_from_user_policy_type(&type, attrs, extack);
2857 if (err)
2858 return err;
2859
2860 err = xfrm_policy_flush(net, type, true);
2861 if (err) {
2862 if (err == -ESRCH) /* empty table */
2863 return 0;
2864 return err;
2865 }
2866
2867 c.data.type = type;
2868 c.event = nlh->nlmsg_type;
2869 c.seq = nlh->nlmsg_seq;
2870 c.portid = nlh->nlmsg_pid;
2871 c.net = net;
2872 km_policy_notify(NULL, 0, &c);
2873 return 0;
2874 }
2875
xfrm_add_pol_expire(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2876 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2877 struct nlattr **attrs,
2878 struct netlink_ext_ack *extack)
2879 {
2880 struct net *net = sock_net(skb->sk);
2881 struct xfrm_policy *xp;
2882 struct xfrm_user_polexpire *up = nlmsg_data(nlh);
2883 struct xfrm_userpolicy_info *p = &up->pol;
2884 u8 type = XFRM_POLICY_TYPE_MAIN;
2885 int err = -ENOENT;
2886 struct xfrm_mark m;
2887 u32 if_id = 0;
2888
2889 err = copy_from_user_policy_type(&type, attrs, extack);
2890 if (err)
2891 return err;
2892
2893 err = verify_policy_dir(p->dir, extack);
2894 if (err)
2895 return err;
2896
2897 if (attrs[XFRMA_IF_ID])
2898 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2899
2900 xfrm_mark_get(attrs, &m);
2901
2902 if (p->index)
2903 xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
2904 0, &err);
2905 else {
2906 struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2907 struct xfrm_sec_ctx *ctx;
2908
2909 err = verify_sec_ctx_len(attrs, extack);
2910 if (err)
2911 return err;
2912
2913 ctx = NULL;
2914 if (rt) {
2915 struct xfrm_user_sec_ctx *uctx = nla_data(rt);
2916
2917 err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
2918 if (err)
2919 return err;
2920 }
2921 xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2922 &p->sel, ctx, 0, &err);
2923 security_xfrm_policy_free(ctx);
2924 }
2925 if (xp == NULL)
2926 return -ENOENT;
2927
2928 if (unlikely(xp->walk.dead))
2929 goto out;
2930
2931 err = 0;
2932 if (up->hard) {
2933 xfrm_policy_delete(xp, p->dir);
2934 xfrm_audit_policy_delete(xp, 1, true);
2935 }
2936 km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
2937
2938 out:
2939 xfrm_pol_put(xp);
2940 return err;
2941 }
2942
xfrm_add_sa_expire(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2943 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2944 struct nlattr **attrs,
2945 struct netlink_ext_ack *extack)
2946 {
2947 struct net *net = sock_net(skb->sk);
2948 struct xfrm_state *x;
2949 int err;
2950 struct xfrm_user_expire *ue = nlmsg_data(nlh);
2951 struct xfrm_usersa_info *p = &ue->state;
2952 struct xfrm_mark m;
2953 u32 mark = xfrm_mark_get(attrs, &m);
2954
2955 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
2956
2957 err = -ENOENT;
2958 if (x == NULL)
2959 return err;
2960
2961 spin_lock_bh(&x->lock);
2962 err = -EINVAL;
2963 if (x->km.state != XFRM_STATE_VALID) {
2964 NL_SET_ERR_MSG(extack, "SA must be in VALID state");
2965 goto out;
2966 }
2967
2968 km_state_expired(x, ue->hard, nlh->nlmsg_pid);
2969
2970 if (ue->hard) {
2971 __xfrm_state_delete(x);
2972 xfrm_audit_state_delete(x, 1, true);
2973 }
2974 err = 0;
2975 out:
2976 spin_unlock_bh(&x->lock);
2977 xfrm_state_put(x);
2978 return err;
2979 }
2980
xfrm_add_acquire(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)2981 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2982 struct nlattr **attrs,
2983 struct netlink_ext_ack *extack)
2984 {
2985 struct net *net = sock_net(skb->sk);
2986 struct xfrm_policy *xp;
2987 struct xfrm_user_tmpl *ut;
2988 int i;
2989 struct nlattr *rt = attrs[XFRMA_TMPL];
2990 struct xfrm_mark mark;
2991
2992 struct xfrm_user_acquire *ua = nlmsg_data(nlh);
2993 struct xfrm_state *x = xfrm_state_alloc(net);
2994 int err = -ENOMEM;
2995
2996 if (!x)
2997 goto nomem;
2998
2999 xfrm_mark_get(attrs, &mark);
3000
3001 if (attrs[XFRMA_SA_PCPU]) {
3002 x->pcpu_num = nla_get_u32(attrs[XFRMA_SA_PCPU]);
3003 err = -EINVAL;
3004 if (x->pcpu_num >= num_possible_cpus())
3005 goto free_state;
3006 }
3007
3008 err = verify_newpolicy_info(&ua->policy, extack);
3009 if (err)
3010 goto free_state;
3011 err = verify_sec_ctx_len(attrs, extack);
3012 if (err)
3013 goto free_state;
3014
3015 /* build an XP */
3016 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err, extack);
3017 if (!xp)
3018 goto free_state;
3019
3020 memcpy(&x->id, &ua->id, sizeof(ua->id));
3021 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
3022 memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
3023 xp->mark.m = x->mark.m = mark.m;
3024 xp->mark.v = x->mark.v = mark.v;
3025 ut = nla_data(rt);
3026 /* extract the templates and for each call km_key */
3027 for (i = 0; i < xp->xfrm_nr; i++, ut++) {
3028 struct xfrm_tmpl *t = &xp->xfrm_vec[i];
3029 memcpy(&x->id, &t->id, sizeof(x->id));
3030 x->props.mode = t->mode;
3031 x->props.reqid = t->reqid;
3032 x->props.family = ut->family;
3033 t->aalgos = ua->aalgos;
3034 t->ealgos = ua->ealgos;
3035 t->calgos = ua->calgos;
3036 err = km_query(x, t, xp);
3037
3038 }
3039
3040 xfrm_state_free(x);
3041 xfrm_dev_policy_delete(xp);
3042 xfrm_dev_policy_free(xp);
3043 security_xfrm_policy_free(xp->security);
3044 kfree(xp);
3045
3046 return 0;
3047
3048 free_state:
3049 xfrm_state_free(x);
3050 nomem:
3051 return err;
3052 }
3053
3054 #ifdef CONFIG_XFRM_MIGRATE
copy_from_user_migrate(struct xfrm_migrate * ma,struct xfrm_kmaddress * k,struct nlattr ** attrs,int * num,struct netlink_ext_ack * extack)3055 static int copy_from_user_migrate(struct xfrm_migrate *ma,
3056 struct xfrm_kmaddress *k,
3057 struct nlattr **attrs, int *num,
3058 struct netlink_ext_ack *extack)
3059 {
3060 struct nlattr *rt = attrs[XFRMA_MIGRATE];
3061 struct xfrm_user_migrate *um;
3062 int i, num_migrate;
3063
3064 if (k != NULL) {
3065 struct xfrm_user_kmaddress *uk;
3066
3067 uk = nla_data(attrs[XFRMA_KMADDRESS]);
3068 memcpy(&k->local, &uk->local, sizeof(k->local));
3069 memcpy(&k->remote, &uk->remote, sizeof(k->remote));
3070 k->family = uk->family;
3071 k->reserved = uk->reserved;
3072 }
3073
3074 um = nla_data(rt);
3075 num_migrate = nla_len(rt) / sizeof(*um);
3076
3077 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) {
3078 NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
3079 return -EINVAL;
3080 }
3081
3082 for (i = 0; i < num_migrate; i++, um++, ma++) {
3083 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
3084 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
3085 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
3086 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
3087
3088 ma->proto = um->proto;
3089 ma->mode = um->mode;
3090 ma->reqid = um->reqid;
3091
3092 ma->old_family = um->old_family;
3093 ma->new_family = um->new_family;
3094 }
3095
3096 *num = i;
3097 return 0;
3098 }
3099
xfrm_do_migrate(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)3100 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
3101 struct nlattr **attrs, struct netlink_ext_ack *extack)
3102 {
3103 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
3104 struct xfrm_migrate m[XFRM_MAX_DEPTH];
3105 struct xfrm_kmaddress km, *kmp;
3106 u8 type;
3107 int err;
3108 int n = 0;
3109 struct net *net = sock_net(skb->sk);
3110 struct xfrm_encap_tmpl *encap = NULL;
3111 struct xfrm_user_offload *xuo = NULL;
3112 u32 if_id = 0;
3113
3114 if (!attrs[XFRMA_MIGRATE]) {
3115 NL_SET_ERR_MSG(extack, "Missing required MIGRATE attribute");
3116 return -EINVAL;
3117 }
3118
3119 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
3120
3121 err = copy_from_user_policy_type(&type, attrs, extack);
3122 if (err)
3123 return err;
3124
3125 err = copy_from_user_migrate(m, kmp, attrs, &n, extack);
3126 if (err)
3127 return err;
3128
3129 if (!n)
3130 return 0;
3131
3132 if (attrs[XFRMA_ENCAP]) {
3133 encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
3134 sizeof(*encap), GFP_KERNEL);
3135 if (!encap)
3136 return -ENOMEM;
3137 }
3138
3139 if (attrs[XFRMA_IF_ID])
3140 if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
3141
3142 if (attrs[XFRMA_OFFLOAD_DEV]) {
3143 xuo = kmemdup(nla_data(attrs[XFRMA_OFFLOAD_DEV]),
3144 sizeof(*xuo), GFP_KERNEL);
3145 if (!xuo) {
3146 err = -ENOMEM;
3147 goto error;
3148 }
3149 }
3150 err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap,
3151 if_id, extack, xuo);
3152 error:
3153 kfree(encap);
3154 kfree(xuo);
3155 return err;
3156 }
3157 #else
xfrm_do_migrate(struct sk_buff * skb,struct nlmsghdr * nlh,struct nlattr ** attrs,struct netlink_ext_ack * extack)3158 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
3159 struct nlattr **attrs, struct netlink_ext_ack *extack)
3160 {
3161 return -ENOPROTOOPT;
3162 }
3163 #endif
3164
3165 #ifdef CONFIG_XFRM_MIGRATE
copy_to_user_migrate(const struct xfrm_migrate * m,struct sk_buff * skb)3166 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
3167 {
3168 struct xfrm_user_migrate um;
3169
3170 memset(&um, 0, sizeof(um));
3171 um.proto = m->proto;
3172 um.mode = m->mode;
3173 um.reqid = m->reqid;
3174 um.old_family = m->old_family;
3175 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
3176 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
3177 um.new_family = m->new_family;
3178 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
3179 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
3180
3181 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
3182 }
3183
copy_to_user_kmaddress(const struct xfrm_kmaddress * k,struct sk_buff * skb)3184 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
3185 {
3186 struct xfrm_user_kmaddress uk;
3187
3188 memset(&uk, 0, sizeof(uk));
3189 uk.family = k->family;
3190 uk.reserved = k->reserved;
3191 memcpy(&uk.local, &k->local, sizeof(uk.local));
3192 memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
3193
3194 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
3195 }
3196
xfrm_migrate_msgsize(int num_migrate,int with_kma,int with_encp)3197 static inline unsigned int xfrm_migrate_msgsize(int num_migrate, int with_kma,
3198 int with_encp)
3199 {
3200 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
3201 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
3202 + (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0)
3203 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
3204 + userpolicy_type_attrsize();
3205 }
3206
build_migrate(struct sk_buff * skb,const struct xfrm_migrate * m,int num_migrate,const struct xfrm_kmaddress * k,const struct xfrm_selector * sel,const struct xfrm_encap_tmpl * encap,u8 dir,u8 type)3207 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
3208 int num_migrate, const struct xfrm_kmaddress *k,
3209 const struct xfrm_selector *sel,
3210 const struct xfrm_encap_tmpl *encap, u8 dir, u8 type)
3211 {
3212 const struct xfrm_migrate *mp;
3213 struct xfrm_userpolicy_id *pol_id;
3214 struct nlmsghdr *nlh;
3215 int i, err;
3216
3217 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
3218 if (nlh == NULL)
3219 return -EMSGSIZE;
3220
3221 pol_id = nlmsg_data(nlh);
3222 /* copy data from selector, dir, and type to the pol_id */
3223 memset(pol_id, 0, sizeof(*pol_id));
3224 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
3225 pol_id->dir = dir;
3226
3227 if (k != NULL) {
3228 err = copy_to_user_kmaddress(k, skb);
3229 if (err)
3230 goto out_cancel;
3231 }
3232 if (encap) {
3233 err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap);
3234 if (err)
3235 goto out_cancel;
3236 }
3237 err = copy_to_user_policy_type(type, skb);
3238 if (err)
3239 goto out_cancel;
3240 for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
3241 err = copy_to_user_migrate(mp, skb);
3242 if (err)
3243 goto out_cancel;
3244 }
3245
3246 nlmsg_end(skb, nlh);
3247 return 0;
3248
3249 out_cancel:
3250 nlmsg_cancel(skb, nlh);
3251 return err;
3252 }
3253
xfrm_send_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,const struct xfrm_migrate * m,int num_migrate,const struct xfrm_kmaddress * k,const struct xfrm_encap_tmpl * encap)3254 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3255 const struct xfrm_migrate *m, int num_migrate,
3256 const struct xfrm_kmaddress *k,
3257 const struct xfrm_encap_tmpl *encap)
3258 {
3259 struct net *net = &init_net;
3260 struct sk_buff *skb;
3261 int err;
3262
3263 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap),
3264 GFP_ATOMIC);
3265 if (skb == NULL)
3266 return -ENOMEM;
3267
3268 /* build migrate */
3269 err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type);
3270 BUG_ON(err < 0);
3271
3272 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
3273 }
3274 #else
xfrm_send_migrate(const struct xfrm_selector * sel,u8 dir,u8 type,const struct xfrm_migrate * m,int num_migrate,const struct xfrm_kmaddress * k,const struct xfrm_encap_tmpl * encap)3275 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3276 const struct xfrm_migrate *m, int num_migrate,
3277 const struct xfrm_kmaddress *k,
3278 const struct xfrm_encap_tmpl *encap)
3279 {
3280 return -ENOPROTOOPT;
3281 }
3282 #endif
3283
3284 #define XMSGSIZE(type) sizeof(struct type)
3285
3286 const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
3287 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
3288 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
3289 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
3290 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
3291 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
3292 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
3293 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
3294 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
3295 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
3296 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
3297 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
3298 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
3299 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
3300 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
3301 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
3302 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
3303 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
3304 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
3305 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32),
3306 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
3307 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32),
3308 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
3309 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
3310 };
3311 EXPORT_SYMBOL_GPL(xfrm_msg_min);
3312
3313 #undef XMSGSIZE
3314
3315 const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
3316 [XFRMA_UNSPEC] = { .strict_start_type = XFRMA_SA_DIR },
3317 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)},
3318 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)},
3319 [XFRMA_LASTUSED] = { .type = NLA_U64},
3320 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)},
3321 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) },
3322 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) },
3323 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) },
3324 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
3325 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
3326 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
3327 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
3328 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
3329 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
3330 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
3331 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 },
3332 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) },
3333 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) },
3334 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)},
3335 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) },
3336 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) },
3337 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) },
3338 [XFRMA_TFCPAD] = { .type = NLA_U32 },
3339 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
3340 [XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
3341 [XFRMA_PROTO] = { .type = NLA_U8 },
3342 [XFRMA_ADDRESS_FILTER] = { .len = sizeof(struct xfrm_address_filter) },
3343 [XFRMA_OFFLOAD_DEV] = { .len = sizeof(struct xfrm_user_offload) },
3344 [XFRMA_SET_MARK] = { .type = NLA_U32 },
3345 [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
3346 [XFRMA_IF_ID] = { .type = NLA_U32 },
3347 [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
3348 [XFRMA_SA_DIR] = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT),
3349 [XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 },
3350 [XFRMA_SA_PCPU] = { .type = NLA_U32 },
3351 [XFRMA_IPTFS_DROP_TIME] = { .type = NLA_U32 },
3352 [XFRMA_IPTFS_REORDER_WINDOW] = { .type = NLA_U16 },
3353 [XFRMA_IPTFS_DONT_FRAG] = { .type = NLA_FLAG },
3354 [XFRMA_IPTFS_INIT_DELAY] = { .type = NLA_U32 },
3355 [XFRMA_IPTFS_MAX_QSIZE] = { .type = NLA_U32 },
3356 [XFRMA_IPTFS_PKT_SIZE] = { .type = NLA_U32 },
3357 };
3358 EXPORT_SYMBOL_GPL(xfrma_policy);
3359
3360 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
3361 [XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
3362 [XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
3363 };
3364
3365 static const struct xfrm_link {
3366 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **,
3367 struct netlink_ext_ack *);
3368 int (*start)(struct netlink_callback *);
3369 int (*dump)(struct sk_buff *, struct netlink_callback *);
3370 int (*done)(struct netlink_callback *);
3371 const struct nla_policy *nla_pol;
3372 int nla_max;
3373 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
3374 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
3375 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa },
3376 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
3377 .dump = xfrm_dump_sa,
3378 .done = xfrm_dump_sa_done },
3379 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
3380 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy },
3381 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
3382 .start = xfrm_dump_policy_start,
3383 .dump = xfrm_dump_policy,
3384 .done = xfrm_dump_policy_done },
3385 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
3386 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire },
3387 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
3388 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy },
3389 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa },
3390 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
3391 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa },
3392 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
3393 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae },
3394 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae },
3395 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate },
3396 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo },
3397 [XFRM_MSG_NEWSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo,
3398 .nla_pol = xfrma_spd_policy,
3399 .nla_max = XFRMA_SPD_MAX },
3400 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo },
3401 [XFRM_MSG_SETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_set_default },
3402 [XFRM_MSG_GETDEFAULT - XFRM_MSG_BASE] = { .doit = xfrm_get_default },
3403 };
3404
xfrm_reject_unused_attr(int type,struct nlattr ** attrs,struct netlink_ext_ack * extack)3405 static int xfrm_reject_unused_attr(int type, struct nlattr **attrs,
3406 struct netlink_ext_ack *extack)
3407 {
3408 if (attrs[XFRMA_SA_DIR]) {
3409 switch (type) {
3410 case XFRM_MSG_NEWSA:
3411 case XFRM_MSG_UPDSA:
3412 case XFRM_MSG_ALLOCSPI:
3413 break;
3414 default:
3415 NL_SET_ERR_MSG(extack, "Invalid attribute SA_DIR");
3416 return -EINVAL;
3417 }
3418 }
3419
3420 if (attrs[XFRMA_SA_PCPU]) {
3421 switch (type) {
3422 case XFRM_MSG_NEWSA:
3423 case XFRM_MSG_UPDSA:
3424 case XFRM_MSG_ALLOCSPI:
3425 case XFRM_MSG_ACQUIRE:
3426
3427 break;
3428 default:
3429 NL_SET_ERR_MSG(extack, "Invalid attribute SA_PCPU");
3430 return -EINVAL;
3431 }
3432 }
3433
3434 return 0;
3435 }
3436
xfrm_user_rcv_msg(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)3437 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
3438 struct netlink_ext_ack *extack)
3439 {
3440 struct net *net = sock_net(skb->sk);
3441 struct nlattr *attrs[XFRMA_MAX+1];
3442 const struct xfrm_link *link;
3443 struct nlmsghdr *nlh64 = NULL;
3444 int type, err;
3445
3446 type = nlh->nlmsg_type;
3447 if (type > XFRM_MSG_MAX)
3448 return -EINVAL;
3449
3450 type -= XFRM_MSG_BASE;
3451 link = &xfrm_dispatch[type];
3452
3453 /* All operations require privileges, even GET */
3454 if (!netlink_net_capable(skb, CAP_NET_ADMIN))
3455 return -EPERM;
3456
3457 if (in_compat_syscall()) {
3458 struct xfrm_translator *xtr = xfrm_get_translator();
3459
3460 if (!xtr)
3461 return -EOPNOTSUPP;
3462
3463 nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
3464 link->nla_pol, extack);
3465 xfrm_put_translator(xtr);
3466 if (IS_ERR(nlh64))
3467 return PTR_ERR(nlh64);
3468 if (nlh64)
3469 nlh = nlh64;
3470 }
3471
3472 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
3473 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
3474 (nlh->nlmsg_flags & NLM_F_DUMP)) {
3475 struct netlink_dump_control c = {
3476 .start = link->start,
3477 .dump = link->dump,
3478 .done = link->done,
3479 };
3480
3481 if (link->dump == NULL) {
3482 err = -EINVAL;
3483 goto err;
3484 }
3485
3486 err = netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
3487 goto err;
3488 }
3489
3490 err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs,
3491 link->nla_max ? : XFRMA_MAX,
3492 link->nla_pol ? : xfrma_policy, extack);
3493 if (err < 0)
3494 goto err;
3495
3496 if (!link->nla_pol || link->nla_pol == xfrma_policy) {
3497 err = xfrm_reject_unused_attr((type + XFRM_MSG_BASE), attrs, extack);
3498 if (err < 0)
3499 goto err;
3500 }
3501
3502 if (link->doit == NULL) {
3503 err = -EINVAL;
3504 goto err;
3505 }
3506
3507 err = link->doit(skb, nlh, attrs, extack);
3508
3509 /* We need to free skb allocated in xfrm_alloc_compat() before
3510 * returning from this function, because consume_skb() won't take
3511 * care of frag_list since netlink destructor sets
3512 * sbk->head to NULL. (see netlink_skb_destructor())
3513 */
3514 if (skb_has_frag_list(skb)) {
3515 kfree_skb(skb_shinfo(skb)->frag_list);
3516 skb_shinfo(skb)->frag_list = NULL;
3517 }
3518
3519 err:
3520 kvfree(nlh64);
3521 return err;
3522 }
3523
xfrm_netlink_rcv(struct sk_buff * skb)3524 static void xfrm_netlink_rcv(struct sk_buff *skb)
3525 {
3526 struct net *net = sock_net(skb->sk);
3527
3528 mutex_lock(&net->xfrm.xfrm_cfg_mutex);
3529 netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
3530 mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
3531 }
3532
xfrm_expire_msgsize(void)3533 static inline unsigned int xfrm_expire_msgsize(void)
3534 {
3535 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) +
3536 nla_total_size(sizeof(struct xfrm_mark)) +
3537 nla_total_size(sizeof_field(struct xfrm_state, dir)) +
3538 nla_total_size(4); /* XFRMA_SA_PCPU */
3539 }
3540
build_expire(struct sk_buff * skb,struct xfrm_state * x,const struct km_event * c)3541 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
3542 {
3543 struct xfrm_user_expire *ue;
3544 struct nlmsghdr *nlh;
3545 int err;
3546
3547 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
3548 if (nlh == NULL)
3549 return -EMSGSIZE;
3550
3551 ue = nlmsg_data(nlh);
3552 copy_to_user_state(x, &ue->state);
3553 ue->hard = (c->data.hard != 0) ? 1 : 0;
3554 /* clear the padding bytes */
3555 memset_after(ue, 0, hard);
3556
3557 err = xfrm_mark_put(skb, &x->mark);
3558 if (err)
3559 return err;
3560
3561 err = xfrm_if_id_put(skb, x->if_id);
3562 if (err)
3563 return err;
3564 if (x->pcpu_num != UINT_MAX) {
3565 err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
3566 if (err)
3567 return err;
3568 }
3569
3570 if (x->dir) {
3571 err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
3572 if (err)
3573 return err;
3574 }
3575
3576 nlmsg_end(skb, nlh);
3577 return 0;
3578 }
3579
xfrm_exp_state_notify(struct xfrm_state * x,const struct km_event * c)3580 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
3581 {
3582 struct net *net = xs_net(x);
3583 struct sk_buff *skb;
3584
3585 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
3586 if (skb == NULL)
3587 return -ENOMEM;
3588
3589 if (build_expire(skb, x, c) < 0) {
3590 kfree_skb(skb);
3591 return -EMSGSIZE;
3592 }
3593
3594 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
3595 }
3596
xfrm_aevent_state_notify(struct xfrm_state * x,const struct km_event * c)3597 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
3598 {
3599 struct net *net = xs_net(x);
3600 struct sk_buff *skb;
3601 int err;
3602
3603 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
3604 if (skb == NULL)
3605 return -ENOMEM;
3606
3607 err = build_aevent(skb, x, c);
3608 BUG_ON(err < 0);
3609
3610 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
3611 }
3612
xfrm_notify_sa_flush(const struct km_event * c)3613 static int xfrm_notify_sa_flush(const struct km_event *c)
3614 {
3615 struct net *net = c->net;
3616 struct xfrm_usersa_flush *p;
3617 struct nlmsghdr *nlh;
3618 struct sk_buff *skb;
3619 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
3620
3621 skb = nlmsg_new(len, GFP_ATOMIC);
3622 if (skb == NULL)
3623 return -ENOMEM;
3624
3625 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
3626 if (nlh == NULL) {
3627 kfree_skb(skb);
3628 return -EMSGSIZE;
3629 }
3630
3631 p = nlmsg_data(nlh);
3632 p->proto = c->data.proto;
3633
3634 nlmsg_end(skb, nlh);
3635
3636 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
3637 }
3638
xfrm_sa_len(struct xfrm_state * x)3639 static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
3640 {
3641 unsigned int l = 0;
3642 if (x->aead)
3643 l += nla_total_size(aead_len(x->aead));
3644 if (x->aalg) {
3645 l += nla_total_size(sizeof(struct xfrm_algo) +
3646 (x->aalg->alg_key_len + 7) / 8);
3647 l += nla_total_size(xfrm_alg_auth_len(x->aalg));
3648 }
3649 if (x->ealg)
3650 l += nla_total_size(xfrm_alg_len(x->ealg));
3651 if (x->calg)
3652 l += nla_total_size(sizeof(*x->calg));
3653 if (x->encap)
3654 l += nla_total_size(sizeof(*x->encap));
3655 if (x->tfcpad)
3656 l += nla_total_size(sizeof(x->tfcpad));
3657 if (x->replay_esn)
3658 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
3659 else
3660 l += nla_total_size(sizeof(struct xfrm_replay_state));
3661 if (x->security)
3662 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
3663 x->security->ctx_len);
3664 if (x->coaddr)
3665 l += nla_total_size(sizeof(*x->coaddr));
3666 if (x->props.extra_flags)
3667 l += nla_total_size(sizeof(x->props.extra_flags));
3668 if (x->xso.dev)
3669 l += nla_total_size(sizeof(struct xfrm_user_offload));
3670 if (x->props.smark.v | x->props.smark.m) {
3671 l += nla_total_size(sizeof(x->props.smark.v));
3672 l += nla_total_size(sizeof(x->props.smark.m));
3673 }
3674 if (x->if_id)
3675 l += nla_total_size(sizeof(x->if_id));
3676 if (x->pcpu_num)
3677 l += nla_total_size(sizeof(x->pcpu_num));
3678
3679 /* Must count x->lastused as it may become non-zero behind our back. */
3680 l += nla_total_size_64bit(sizeof(u64));
3681
3682 if (x->mapping_maxage)
3683 l += nla_total_size(sizeof(x->mapping_maxage));
3684
3685 if (x->dir)
3686 l += nla_total_size(sizeof(x->dir));
3687
3688 if (x->nat_keepalive_interval)
3689 l += nla_total_size(sizeof(x->nat_keepalive_interval));
3690
3691 if (x->mode_cbs && x->mode_cbs->sa_len)
3692 l += x->mode_cbs->sa_len(x);
3693
3694 return l;
3695 }
3696
xfrm_notify_sa(struct xfrm_state * x,const struct km_event * c)3697 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
3698 {
3699 struct net *net = xs_net(x);
3700 struct xfrm_usersa_info *p;
3701 struct xfrm_usersa_id *id;
3702 struct nlmsghdr *nlh;
3703 struct sk_buff *skb;
3704 unsigned int len = xfrm_sa_len(x);
3705 unsigned int headlen;
3706 int err;
3707
3708 headlen = sizeof(*p);
3709 if (c->event == XFRM_MSG_DELSA) {
3710 len += nla_total_size(headlen);
3711 headlen = sizeof(*id);
3712 len += nla_total_size(sizeof(struct xfrm_mark));
3713 }
3714 len += NLMSG_ALIGN(headlen);
3715
3716 skb = nlmsg_new(len, GFP_ATOMIC);
3717 if (skb == NULL)
3718 return -ENOMEM;
3719
3720 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
3721 err = -EMSGSIZE;
3722 if (nlh == NULL)
3723 goto out_free_skb;
3724
3725 p = nlmsg_data(nlh);
3726 if (c->event == XFRM_MSG_DELSA) {
3727 struct nlattr *attr;
3728
3729 id = nlmsg_data(nlh);
3730 memset(id, 0, sizeof(*id));
3731 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
3732 id->spi = x->id.spi;
3733 id->family = x->props.family;
3734 id->proto = x->id.proto;
3735
3736 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
3737 err = -EMSGSIZE;
3738 if (attr == NULL)
3739 goto out_free_skb;
3740
3741 p = nla_data(attr);
3742 }
3743 err = copy_to_user_state_extra(x, p, skb);
3744 if (err)
3745 goto out_free_skb;
3746
3747 nlmsg_end(skb, nlh);
3748
3749 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
3750
3751 out_free_skb:
3752 kfree_skb(skb);
3753 return err;
3754 }
3755
xfrm_send_state_notify(struct xfrm_state * x,const struct km_event * c)3756 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
3757 {
3758
3759 switch (c->event) {
3760 case XFRM_MSG_EXPIRE:
3761 return xfrm_exp_state_notify(x, c);
3762 case XFRM_MSG_NEWAE:
3763 return xfrm_aevent_state_notify(x, c);
3764 case XFRM_MSG_DELSA:
3765 case XFRM_MSG_UPDSA:
3766 case XFRM_MSG_NEWSA:
3767 return xfrm_notify_sa(x, c);
3768 case XFRM_MSG_FLUSHSA:
3769 return xfrm_notify_sa_flush(c);
3770 default:
3771 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
3772 c->event);
3773 break;
3774 }
3775
3776 return 0;
3777
3778 }
3779
xfrm_acquire_msgsize(struct xfrm_state * x,struct xfrm_policy * xp)3780 static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x,
3781 struct xfrm_policy *xp)
3782 {
3783 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
3784 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
3785 + nla_total_size(sizeof(struct xfrm_mark))
3786 + nla_total_size(xfrm_user_sec_ctx_size(x->security))
3787 + nla_total_size(4) /* XFRMA_SA_PCPU */
3788 + userpolicy_type_attrsize();
3789 }
3790
build_acquire(struct sk_buff * skb,struct xfrm_state * x,struct xfrm_tmpl * xt,struct xfrm_policy * xp)3791 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
3792 struct xfrm_tmpl *xt, struct xfrm_policy *xp)
3793 {
3794 __u32 seq = xfrm_get_acqseq();
3795 struct xfrm_user_acquire *ua;
3796 struct nlmsghdr *nlh;
3797 int err;
3798
3799 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
3800 if (nlh == NULL)
3801 return -EMSGSIZE;
3802
3803 ua = nlmsg_data(nlh);
3804 memcpy(&ua->id, &x->id, sizeof(ua->id));
3805 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
3806 memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
3807 copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
3808 ua->aalgos = xt->aalgos;
3809 ua->ealgos = xt->ealgos;
3810 ua->calgos = xt->calgos;
3811 ua->seq = x->km.seq = seq;
3812
3813 err = copy_to_user_tmpl(xp, skb);
3814 if (!err)
3815 err = copy_to_user_state_sec_ctx(x, skb);
3816 if (!err)
3817 err = copy_to_user_policy_type(xp->type, skb);
3818 if (!err)
3819 err = xfrm_mark_put(skb, &xp->mark);
3820 if (!err)
3821 err = xfrm_if_id_put(skb, xp->if_id);
3822 if (!err && xp->xdo.dev)
3823 err = copy_user_offload(&xp->xdo, skb);
3824 if (!err && x->pcpu_num != UINT_MAX)
3825 err = nla_put_u32(skb, XFRMA_SA_PCPU, x->pcpu_num);
3826 if (err) {
3827 nlmsg_cancel(skb, nlh);
3828 return err;
3829 }
3830
3831 nlmsg_end(skb, nlh);
3832 return 0;
3833 }
3834
xfrm_send_acquire(struct xfrm_state * x,struct xfrm_tmpl * xt,struct xfrm_policy * xp)3835 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
3836 struct xfrm_policy *xp)
3837 {
3838 struct net *net = xs_net(x);
3839 struct sk_buff *skb;
3840 int err;
3841
3842 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
3843 if (skb == NULL)
3844 return -ENOMEM;
3845
3846 err = build_acquire(skb, x, xt, xp);
3847 BUG_ON(err < 0);
3848
3849 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
3850 }
3851
3852 /* User gives us xfrm_user_policy_info followed by an array of 0
3853 * or more templates.
3854 */
xfrm_compile_policy(struct sock * sk,int opt,u8 * data,int len,int * dir)3855 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
3856 u8 *data, int len, int *dir)
3857 {
3858 struct net *net = sock_net(sk);
3859 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
3860 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
3861 struct xfrm_policy *xp;
3862 int nr;
3863
3864 switch (sk->sk_family) {
3865 case AF_INET:
3866 if (opt != IP_XFRM_POLICY) {
3867 *dir = -EOPNOTSUPP;
3868 return NULL;
3869 }
3870 break;
3871 #if IS_ENABLED(CONFIG_IPV6)
3872 case AF_INET6:
3873 if (opt != IPV6_XFRM_POLICY) {
3874 *dir = -EOPNOTSUPP;
3875 return NULL;
3876 }
3877 break;
3878 #endif
3879 default:
3880 *dir = -EINVAL;
3881 return NULL;
3882 }
3883
3884 *dir = -EINVAL;
3885
3886 if (len < sizeof(*p) ||
3887 verify_newpolicy_info(p, NULL))
3888 return NULL;
3889
3890 nr = ((len - sizeof(*p)) / sizeof(*ut));
3891 if (validate_tmpl(nr, ut, p->sel.family, p->dir, NULL))
3892 return NULL;
3893
3894 if (p->dir > XFRM_POLICY_OUT)
3895 return NULL;
3896
3897 xp = xfrm_policy_alloc(net, GFP_ATOMIC);
3898 if (xp == NULL) {
3899 *dir = -ENOBUFS;
3900 return NULL;
3901 }
3902
3903 copy_from_user_policy(xp, p);
3904 xp->type = XFRM_POLICY_TYPE_MAIN;
3905 copy_templates(xp, ut, nr);
3906
3907 *dir = p->dir;
3908
3909 return xp;
3910 }
3911
xfrm_polexpire_msgsize(struct xfrm_policy * xp)3912 static inline unsigned int xfrm_polexpire_msgsize(struct xfrm_policy *xp)
3913 {
3914 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
3915 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
3916 + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
3917 + nla_total_size(sizeof(struct xfrm_mark))
3918 + userpolicy_type_attrsize();
3919 }
3920
build_polexpire(struct sk_buff * skb,struct xfrm_policy * xp,int dir,const struct km_event * c)3921 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
3922 int dir, const struct km_event *c)
3923 {
3924 struct xfrm_user_polexpire *upe;
3925 int hard = c->data.hard;
3926 struct nlmsghdr *nlh;
3927 int err;
3928
3929 nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
3930 if (nlh == NULL)
3931 return -EMSGSIZE;
3932
3933 upe = nlmsg_data(nlh);
3934 copy_to_user_policy(xp, &upe->pol, dir);
3935 err = copy_to_user_tmpl(xp, skb);
3936 if (!err)
3937 err = copy_to_user_sec_ctx(xp, skb);
3938 if (!err)
3939 err = copy_to_user_policy_type(xp->type, skb);
3940 if (!err)
3941 err = xfrm_mark_put(skb, &xp->mark);
3942 if (!err)
3943 err = xfrm_if_id_put(skb, xp->if_id);
3944 if (!err && xp->xdo.dev)
3945 err = copy_user_offload(&xp->xdo, skb);
3946 if (err) {
3947 nlmsg_cancel(skb, nlh);
3948 return err;
3949 }
3950 upe->hard = !!hard;
3951
3952 nlmsg_end(skb, nlh);
3953 return 0;
3954 }
3955
xfrm_exp_policy_notify(struct xfrm_policy * xp,int dir,const struct km_event * c)3956 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
3957 {
3958 struct net *net = xp_net(xp);
3959 struct sk_buff *skb;
3960 int err;
3961
3962 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
3963 if (skb == NULL)
3964 return -ENOMEM;
3965
3966 err = build_polexpire(skb, xp, dir, c);
3967 BUG_ON(err < 0);
3968
3969 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
3970 }
3971
xfrm_notify_policy(struct xfrm_policy * xp,int dir,const struct km_event * c)3972 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
3973 {
3974 unsigned int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
3975 struct net *net = xp_net(xp);
3976 struct xfrm_userpolicy_info *p;
3977 struct xfrm_userpolicy_id *id;
3978 struct nlmsghdr *nlh;
3979 struct sk_buff *skb;
3980 unsigned int headlen;
3981 int err;
3982
3983 headlen = sizeof(*p);
3984 if (c->event == XFRM_MSG_DELPOLICY) {
3985 len += nla_total_size(headlen);
3986 headlen = sizeof(*id);
3987 }
3988 len += userpolicy_type_attrsize();
3989 len += nla_total_size(sizeof(struct xfrm_mark));
3990 len += NLMSG_ALIGN(headlen);
3991
3992 skb = nlmsg_new(len, GFP_ATOMIC);
3993 if (skb == NULL)
3994 return -ENOMEM;
3995
3996 nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
3997 err = -EMSGSIZE;
3998 if (nlh == NULL)
3999 goto out_free_skb;
4000
4001 p = nlmsg_data(nlh);
4002 if (c->event == XFRM_MSG_DELPOLICY) {
4003 struct nlattr *attr;
4004
4005 id = nlmsg_data(nlh);
4006 memset(id, 0, sizeof(*id));
4007 id->dir = dir;
4008 if (c->data.byid)
4009 id->index = xp->index;
4010 else
4011 memcpy(&id->sel, &xp->selector, sizeof(id->sel));
4012
4013 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
4014 err = -EMSGSIZE;
4015 if (attr == NULL)
4016 goto out_free_skb;
4017
4018 p = nla_data(attr);
4019 }
4020
4021 copy_to_user_policy(xp, p, dir);
4022 err = copy_to_user_tmpl(xp, skb);
4023 if (!err)
4024 err = copy_to_user_policy_type(xp->type, skb);
4025 if (!err)
4026 err = xfrm_mark_put(skb, &xp->mark);
4027 if (!err)
4028 err = xfrm_if_id_put(skb, xp->if_id);
4029 if (!err && xp->xdo.dev)
4030 err = copy_user_offload(&xp->xdo, skb);
4031 if (err)
4032 goto out_free_skb;
4033
4034 nlmsg_end(skb, nlh);
4035
4036 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
4037
4038 out_free_skb:
4039 kfree_skb(skb);
4040 return err;
4041 }
4042
xfrm_notify_policy_flush(const struct km_event * c)4043 static int xfrm_notify_policy_flush(const struct km_event *c)
4044 {
4045 struct net *net = c->net;
4046 struct nlmsghdr *nlh;
4047 struct sk_buff *skb;
4048 int err;
4049
4050 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
4051 if (skb == NULL)
4052 return -ENOMEM;
4053
4054 nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
4055 err = -EMSGSIZE;
4056 if (nlh == NULL)
4057 goto out_free_skb;
4058 err = copy_to_user_policy_type(c->data.type, skb);
4059 if (err)
4060 goto out_free_skb;
4061
4062 nlmsg_end(skb, nlh);
4063
4064 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
4065
4066 out_free_skb:
4067 kfree_skb(skb);
4068 return err;
4069 }
4070
xfrm_send_policy_notify(struct xfrm_policy * xp,int dir,const struct km_event * c)4071 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
4072 {
4073
4074 switch (c->event) {
4075 case XFRM_MSG_NEWPOLICY:
4076 case XFRM_MSG_UPDPOLICY:
4077 case XFRM_MSG_DELPOLICY:
4078 return xfrm_notify_policy(xp, dir, c);
4079 case XFRM_MSG_FLUSHPOLICY:
4080 return xfrm_notify_policy_flush(c);
4081 case XFRM_MSG_POLEXPIRE:
4082 return xfrm_exp_policy_notify(xp, dir, c);
4083 default:
4084 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
4085 c->event);
4086 }
4087
4088 return 0;
4089
4090 }
4091
xfrm_report_msgsize(void)4092 static inline unsigned int xfrm_report_msgsize(void)
4093 {
4094 return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
4095 }
4096
build_report(struct sk_buff * skb,u8 proto,struct xfrm_selector * sel,xfrm_address_t * addr)4097 static int build_report(struct sk_buff *skb, u8 proto,
4098 struct xfrm_selector *sel, xfrm_address_t *addr)
4099 {
4100 struct xfrm_user_report *ur;
4101 struct nlmsghdr *nlh;
4102
4103 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
4104 if (nlh == NULL)
4105 return -EMSGSIZE;
4106
4107 ur = nlmsg_data(nlh);
4108 ur->proto = proto;
4109 memcpy(&ur->sel, sel, sizeof(ur->sel));
4110
4111 if (addr) {
4112 int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
4113 if (err) {
4114 nlmsg_cancel(skb, nlh);
4115 return err;
4116 }
4117 }
4118 nlmsg_end(skb, nlh);
4119 return 0;
4120 }
4121
xfrm_send_report(struct net * net,u8 proto,struct xfrm_selector * sel,xfrm_address_t * addr)4122 static int xfrm_send_report(struct net *net, u8 proto,
4123 struct xfrm_selector *sel, xfrm_address_t *addr)
4124 {
4125 struct sk_buff *skb;
4126 int err;
4127
4128 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
4129 if (skb == NULL)
4130 return -ENOMEM;
4131
4132 err = build_report(skb, proto, sel, addr);
4133 BUG_ON(err < 0);
4134
4135 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
4136 }
4137
xfrm_mapping_msgsize(void)4138 static inline unsigned int xfrm_mapping_msgsize(void)
4139 {
4140 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
4141 }
4142
build_mapping(struct sk_buff * skb,struct xfrm_state * x,xfrm_address_t * new_saddr,__be16 new_sport)4143 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
4144 xfrm_address_t *new_saddr, __be16 new_sport)
4145 {
4146 struct xfrm_user_mapping *um;
4147 struct nlmsghdr *nlh;
4148
4149 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
4150 if (nlh == NULL)
4151 return -EMSGSIZE;
4152
4153 um = nlmsg_data(nlh);
4154
4155 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
4156 um->id.spi = x->id.spi;
4157 um->id.family = x->props.family;
4158 um->id.proto = x->id.proto;
4159 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
4160 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
4161 um->new_sport = new_sport;
4162 um->old_sport = x->encap->encap_sport;
4163 um->reqid = x->props.reqid;
4164
4165 nlmsg_end(skb, nlh);
4166 return 0;
4167 }
4168
xfrm_send_mapping(struct xfrm_state * x,xfrm_address_t * ipaddr,__be16 sport)4169 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
4170 __be16 sport)
4171 {
4172 struct net *net = xs_net(x);
4173 struct sk_buff *skb;
4174 int err;
4175
4176 if (x->id.proto != IPPROTO_ESP)
4177 return -EINVAL;
4178
4179 if (!x->encap)
4180 return -EINVAL;
4181
4182 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
4183 if (skb == NULL)
4184 return -ENOMEM;
4185
4186 err = build_mapping(skb, x, ipaddr, sport);
4187 BUG_ON(err < 0);
4188
4189 return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
4190 }
4191
xfrm_is_alive(const struct km_event * c)4192 static bool xfrm_is_alive(const struct km_event *c)
4193 {
4194 return (bool)xfrm_acquire_is_on(c->net);
4195 }
4196
4197 static struct xfrm_mgr netlink_mgr = {
4198 .notify = xfrm_send_state_notify,
4199 .acquire = xfrm_send_acquire,
4200 .compile_policy = xfrm_compile_policy,
4201 .notify_policy = xfrm_send_policy_notify,
4202 .report = xfrm_send_report,
4203 .migrate = xfrm_send_migrate,
4204 .new_mapping = xfrm_send_mapping,
4205 .is_alive = xfrm_is_alive,
4206 };
4207
xfrm_user_net_init(struct net * net)4208 static int __net_init xfrm_user_net_init(struct net *net)
4209 {
4210 struct sock *nlsk;
4211 struct netlink_kernel_cfg cfg = {
4212 .groups = XFRMNLGRP_MAX,
4213 .input = xfrm_netlink_rcv,
4214 };
4215
4216 nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
4217 if (nlsk == NULL)
4218 return -ENOMEM;
4219 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
4220 rcu_assign_pointer(net->xfrm.nlsk, nlsk);
4221 return 0;
4222 }
4223
xfrm_user_net_pre_exit(struct net * net)4224 static void __net_exit xfrm_user_net_pre_exit(struct net *net)
4225 {
4226 RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
4227 }
4228
xfrm_user_net_exit(struct list_head * net_exit_list)4229 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
4230 {
4231 struct net *net;
4232
4233 list_for_each_entry(net, net_exit_list, exit_list)
4234 netlink_kernel_release(net->xfrm.nlsk_stash);
4235 }
4236
4237 static struct pernet_operations xfrm_user_net_ops = {
4238 .init = xfrm_user_net_init,
4239 .pre_exit = xfrm_user_net_pre_exit,
4240 .exit_batch = xfrm_user_net_exit,
4241 };
4242
xfrm_user_init(void)4243 static int __init xfrm_user_init(void)
4244 {
4245 int rv;
4246
4247 printk(KERN_INFO "Initializing XFRM netlink socket\n");
4248
4249 rv = register_pernet_subsys(&xfrm_user_net_ops);
4250 if (rv < 0)
4251 return rv;
4252 xfrm_register_km(&netlink_mgr);
4253 return 0;
4254 }
4255
xfrm_user_exit(void)4256 static void __exit xfrm_user_exit(void)
4257 {
4258 xfrm_unregister_km(&netlink_mgr);
4259 unregister_pernet_subsys(&xfrm_user_net_ops);
4260 }
4261
4262 module_init(xfrm_user_init);
4263 module_exit(xfrm_user_exit);
4264 MODULE_DESCRIPTION("XFRM User interface");
4265 MODULE_LICENSE("GPL");
4266 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
4267