xref: /linux/net/xfrm/xfrm_user.c (revision e6bf1f7aea4df918e5ee848d9f6b7ce63135b4be)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* xfrm_user.c: User interface to configure xfrm engine.
3  *
4  * Copyright (C) 2002 David S. Miller (davem@redhat.com)
5  *
6  * Changes:
7  *	Mitsuru KANDA @USAGI
8  * 	Kazunori MIYAZAWA @USAGI
9  * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10  * 		IPv6 support
11  *
12  */
13 
14 #include <linux/compat.h>
15 #include <linux/crypto.h>
16 #include <linux/module.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/slab.h>
20 #include <linux/socket.h>
21 #include <linux/string.h>
22 #include <linux/net.h>
23 #include <linux/skbuff.h>
24 #include <linux/pfkeyv2.h>
25 #include <linux/ipsec.h>
26 #include <linux/init.h>
27 #include <linux/security.h>
28 #include <net/sock.h>
29 #include <net/xfrm.h>
30 #include <net/netlink.h>
31 #include <net/ah.h>
32 #include <linux/uaccess.h>
33 #if IS_ENABLED(CONFIG_IPV6)
34 #include <linux/in6.h>
35 #endif
36 #include <linux/unaligned.h>
37 
38 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type,
39 			  struct netlink_ext_ack *extack)
40 {
41 	struct nlattr *rt = attrs[type];
42 	struct xfrm_algo *algp;
43 
44 	if (!rt)
45 		return 0;
46 
47 	algp = nla_data(rt);
48 	if (nla_len(rt) < (int)xfrm_alg_len(algp)) {
49 		NL_SET_ERR_MSG(extack, "Invalid AUTH/CRYPT/COMP attribute length");
50 		return -EINVAL;
51 	}
52 
53 	switch (type) {
54 	case XFRMA_ALG_AUTH:
55 	case XFRMA_ALG_CRYPT:
56 	case XFRMA_ALG_COMP:
57 		break;
58 
59 	default:
60 		NL_SET_ERR_MSG(extack, "Invalid algorithm attribute type");
61 		return -EINVAL;
62 	}
63 
64 	algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
65 	return 0;
66 }
67 
68 static int verify_auth_trunc(struct nlattr **attrs,
69 			     struct netlink_ext_ack *extack)
70 {
71 	struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
72 	struct xfrm_algo_auth *algp;
73 
74 	if (!rt)
75 		return 0;
76 
77 	algp = nla_data(rt);
78 	if (nla_len(rt) < (int)xfrm_alg_auth_len(algp)) {
79 		NL_SET_ERR_MSG(extack, "Invalid AUTH_TRUNC attribute length");
80 		return -EINVAL;
81 	}
82 
83 	algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
84 	return 0;
85 }
86 
87 static int verify_aead(struct nlattr **attrs, struct netlink_ext_ack *extack)
88 {
89 	struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
90 	struct xfrm_algo_aead *algp;
91 
92 	if (!rt)
93 		return 0;
94 
95 	algp = nla_data(rt);
96 	if (nla_len(rt) < (int)aead_len(algp)) {
97 		NL_SET_ERR_MSG(extack, "Invalid AEAD attribute length");
98 		return -EINVAL;
99 	}
100 
101 	algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
102 	return 0;
103 }
104 
105 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
106 			   xfrm_address_t **addrp)
107 {
108 	struct nlattr *rt = attrs[type];
109 
110 	if (rt && addrp)
111 		*addrp = nla_data(rt);
112 }
113 
114 static inline int verify_sec_ctx_len(struct nlattr **attrs, struct netlink_ext_ack *extack)
115 {
116 	struct nlattr *rt = attrs[XFRMA_SEC_CTX];
117 	struct xfrm_user_sec_ctx *uctx;
118 
119 	if (!rt)
120 		return 0;
121 
122 	uctx = nla_data(rt);
123 	if (uctx->len > nla_len(rt) ||
124 	    uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) {
125 		NL_SET_ERR_MSG(extack, "Invalid security context length");
126 		return -EINVAL;
127 	}
128 
129 	return 0;
130 }
131 
132 static inline int verify_replay(struct xfrm_usersa_info *p,
133 				struct nlattr **attrs, u8 sa_dir,
134 				struct netlink_ext_ack *extack)
135 {
136 	struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
137 	struct xfrm_replay_state_esn *rs;
138 
139 	if (!rt) {
140 		if (p->flags & XFRM_STATE_ESN) {
141 			NL_SET_ERR_MSG(extack, "Missing required attribute for ESN");
142 			return -EINVAL;
143 		}
144 		return 0;
145 	}
146 
147 	rs = nla_data(rt);
148 
149 	if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) {
150 		NL_SET_ERR_MSG(extack, "ESN bitmap length must be <= 128");
151 		return -EINVAL;
152 	}
153 
154 	if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
155 	    nla_len(rt) != sizeof(*rs)) {
156 		NL_SET_ERR_MSG(extack, "ESN attribute is too short to fit the full bitmap length");
157 		return -EINVAL;
158 	}
159 
160 	/* As only ESP and AH support ESN feature. */
161 	if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH)) {
162 		NL_SET_ERR_MSG(extack, "ESN only supported for ESP and AH");
163 		return -EINVAL;
164 	}
165 
166 	if (p->replay_window != 0) {
167 		NL_SET_ERR_MSG(extack, "ESN not compatible with legacy replay_window");
168 		return -EINVAL;
169 	}
170 
171 	if (sa_dir == XFRM_SA_DIR_OUT)  {
172 		if (rs->replay_window) {
173 			NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA");
174 			return -EINVAL;
175 		}
176 		if (rs->seq || rs->seq_hi) {
177 			NL_SET_ERR_MSG(extack,
178 				       "Replay seq and seq_hi should be 0 for output SA");
179 			return -EINVAL;
180 		}
181 		if (rs->bmp_len) {
182 			NL_SET_ERR_MSG(extack, "Replay bmp_len should 0 for output SA");
183 			return -EINVAL;
184 		}
185 	}
186 
187 	if (sa_dir == XFRM_SA_DIR_IN)  {
188 		if (rs->oseq || rs->oseq_hi) {
189 			NL_SET_ERR_MSG(extack,
190 				       "Replay oseq and oseq_hi should be 0 for input SA");
191 			return -EINVAL;
192 		}
193 	}
194 
195 	return 0;
196 }
197 
198 static int verify_newsa_info(struct xfrm_usersa_info *p,
199 			     struct nlattr **attrs,
200 			     struct netlink_ext_ack *extack)
201 {
202 	int err;
203 	u8 sa_dir = nla_get_u8_default(attrs[XFRMA_SA_DIR], 0);
204 	u16 family = p->sel.family;
205 
206 	err = -EINVAL;
207 	switch (p->family) {
208 	case AF_INET:
209 		break;
210 
211 	case AF_INET6:
212 #if IS_ENABLED(CONFIG_IPV6)
213 		break;
214 #else
215 		err = -EAFNOSUPPORT;
216 		NL_SET_ERR_MSG(extack, "IPv6 support disabled");
217 		goto out;
218 #endif
219 
220 	default:
221 		NL_SET_ERR_MSG(extack, "Invalid address family");
222 		goto out;
223 	}
224 
225 	if (!family && !(p->flags & XFRM_STATE_AF_UNSPEC))
226 		family = p->family;
227 
228 	switch (family) {
229 	case AF_UNSPEC:
230 		break;
231 
232 	case AF_INET:
233 		if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) {
234 			NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)");
235 			goto out;
236 		}
237 
238 		break;
239 
240 	case AF_INET6:
241 #if IS_ENABLED(CONFIG_IPV6)
242 		if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) {
243 			NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)");
244 			goto out;
245 		}
246 
247 		break;
248 #else
249 		NL_SET_ERR_MSG(extack, "IPv6 support disabled");
250 		err = -EAFNOSUPPORT;
251 		goto out;
252 #endif
253 
254 	default:
255 		NL_SET_ERR_MSG(extack, "Invalid address family in selector");
256 		goto out;
257 	}
258 
259 	err = -EINVAL;
260 	switch (p->id.proto) {
261 	case IPPROTO_AH:
262 		if (!attrs[XFRMA_ALG_AUTH]	&&
263 		    !attrs[XFRMA_ALG_AUTH_TRUNC]) {
264 			NL_SET_ERR_MSG(extack, "Missing required attribute for AH: AUTH_TRUNC or AUTH");
265 			goto out;
266 		}
267 
268 		if (attrs[XFRMA_ALG_AEAD]	||
269 		    attrs[XFRMA_ALG_CRYPT]	||
270 		    attrs[XFRMA_ALG_COMP]	||
271 		    attrs[XFRMA_TFCPAD]) {
272 			NL_SET_ERR_MSG(extack, "Invalid attributes for AH: AEAD, CRYPT, COMP, TFCPAD");
273 			goto out;
274 		}
275 		break;
276 
277 	case IPPROTO_ESP:
278 		if (attrs[XFRMA_ALG_COMP]) {
279 			NL_SET_ERR_MSG(extack, "Invalid attribute for ESP: COMP");
280 			goto out;
281 		}
282 
283 		if (!attrs[XFRMA_ALG_AUTH] &&
284 		    !attrs[XFRMA_ALG_AUTH_TRUNC] &&
285 		    !attrs[XFRMA_ALG_CRYPT] &&
286 		    !attrs[XFRMA_ALG_AEAD]) {
287 			NL_SET_ERR_MSG(extack, "Missing required attribute for ESP: at least one of AUTH, AUTH_TRUNC, CRYPT, AEAD");
288 			goto out;
289 		}
290 
291 		if ((attrs[XFRMA_ALG_AUTH] ||
292 		     attrs[XFRMA_ALG_AUTH_TRUNC] ||
293 		     attrs[XFRMA_ALG_CRYPT]) &&
294 		    attrs[XFRMA_ALG_AEAD]) {
295 			NL_SET_ERR_MSG(extack, "Invalid attribute combination for ESP: AEAD can't be used with AUTH, AUTH_TRUNC, CRYPT");
296 			goto out;
297 		}
298 
299 		if (attrs[XFRMA_TFCPAD] &&
300 		    p->mode != XFRM_MODE_TUNNEL) {
301 			NL_SET_ERR_MSG(extack, "TFC padding can only be used in tunnel mode");
302 			goto out;
303 		}
304 		break;
305 
306 	case IPPROTO_COMP:
307 		if (!attrs[XFRMA_ALG_COMP]) {
308 			NL_SET_ERR_MSG(extack, "Missing required attribute for COMP: COMP");
309 			goto out;
310 		}
311 
312 		if (attrs[XFRMA_ALG_AEAD]	||
313 		    attrs[XFRMA_ALG_AUTH]	||
314 		    attrs[XFRMA_ALG_AUTH_TRUNC]	||
315 		    attrs[XFRMA_ALG_CRYPT]	||
316 		    attrs[XFRMA_TFCPAD]) {
317 			NL_SET_ERR_MSG(extack, "Invalid attributes for COMP: AEAD, AUTH, AUTH_TRUNC, CRYPT, TFCPAD");
318 			goto out;
319 		}
320 
321 		if (ntohl(p->id.spi) >= 0x10000) {
322 			NL_SET_ERR_MSG(extack, "SPI is too large for COMP (must be < 0x10000)");
323 			goto out;
324 		}
325 		break;
326 
327 #if IS_ENABLED(CONFIG_IPV6)
328 	case IPPROTO_DSTOPTS:
329 	case IPPROTO_ROUTING:
330 		if (attrs[XFRMA_ALG_COMP]	||
331 		    attrs[XFRMA_ALG_AUTH]	||
332 		    attrs[XFRMA_ALG_AUTH_TRUNC]	||
333 		    attrs[XFRMA_ALG_AEAD]	||
334 		    attrs[XFRMA_ALG_CRYPT]	||
335 		    attrs[XFRMA_ENCAP]		||
336 		    attrs[XFRMA_SEC_CTX]	||
337 		    attrs[XFRMA_TFCPAD]) {
338 			NL_SET_ERR_MSG(extack, "Invalid attributes for DSTOPTS/ROUTING");
339 			goto out;
340 		}
341 
342 		if (!attrs[XFRMA_COADDR]) {
343 			NL_SET_ERR_MSG(extack, "Missing required COADDR attribute for DSTOPTS/ROUTING");
344 			goto out;
345 		}
346 		break;
347 #endif
348 
349 	default:
350 		NL_SET_ERR_MSG(extack, "Unsupported protocol");
351 		goto out;
352 	}
353 
354 	if ((err = verify_aead(attrs, extack)))
355 		goto out;
356 	if ((err = verify_auth_trunc(attrs, extack)))
357 		goto out;
358 	if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH, extack)))
359 		goto out;
360 	if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT, extack)))
361 		goto out;
362 	if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP, extack)))
363 		goto out;
364 	if ((err = verify_sec_ctx_len(attrs, extack)))
365 		goto out;
366 	if ((err = verify_replay(p, attrs, sa_dir, extack)))
367 		goto out;
368 
369 	err = -EINVAL;
370 	switch (p->mode) {
371 	case XFRM_MODE_TRANSPORT:
372 	case XFRM_MODE_TUNNEL:
373 	case XFRM_MODE_ROUTEOPTIMIZATION:
374 	case XFRM_MODE_BEET:
375 		break;
376 
377 	default:
378 		NL_SET_ERR_MSG(extack, "Unsupported mode");
379 		goto out;
380 	}
381 
382 	err = 0;
383 
384 	if (attrs[XFRMA_MTIMER_THRESH]) {
385 		if (!attrs[XFRMA_ENCAP]) {
386 			NL_SET_ERR_MSG(extack, "MTIMER_THRESH attribute can only be set on ENCAP states");
387 			err = -EINVAL;
388 			goto out;
389 		}
390 
391 		if (sa_dir == XFRM_SA_DIR_OUT) {
392 			NL_SET_ERR_MSG(extack,
393 				       "MTIMER_THRESH attribute should not be set on output SA");
394 			err = -EINVAL;
395 			goto out;
396 		}
397 	}
398 
399 	if (sa_dir == XFRM_SA_DIR_OUT) {
400 		if (p->flags & XFRM_STATE_DECAP_DSCP) {
401 			NL_SET_ERR_MSG(extack, "Flag DECAP_DSCP should not be set for output SA");
402 			err = -EINVAL;
403 			goto out;
404 		}
405 
406 		if (p->flags & XFRM_STATE_ICMP) {
407 			NL_SET_ERR_MSG(extack, "Flag ICMP should not be set for output SA");
408 			err = -EINVAL;
409 			goto out;
410 		}
411 
412 		if (p->flags & XFRM_STATE_WILDRECV) {
413 			NL_SET_ERR_MSG(extack, "Flag WILDRECV should not be set for output SA");
414 			err = -EINVAL;
415 			goto out;
416 		}
417 
418 		if (p->replay_window) {
419 			NL_SET_ERR_MSG(extack, "Replay window should be 0 for output SA");
420 			err = -EINVAL;
421 			goto out;
422 		}
423 
424 		if (attrs[XFRMA_REPLAY_VAL]) {
425 			struct xfrm_replay_state *replay;
426 
427 			replay = nla_data(attrs[XFRMA_REPLAY_VAL]);
428 
429 			if (replay->seq || replay->bitmap) {
430 				NL_SET_ERR_MSG(extack,
431 					       "Replay seq and bitmap should be 0 for output SA");
432 				err = -EINVAL;
433 				goto out;
434 			}
435 		}
436 	}
437 
438 	if (sa_dir == XFRM_SA_DIR_IN) {
439 		if (p->flags & XFRM_STATE_NOPMTUDISC) {
440 			NL_SET_ERR_MSG(extack, "Flag NOPMTUDISC should not be set for input SA");
441 			err = -EINVAL;
442 			goto out;
443 		}
444 
445 		if (attrs[XFRMA_SA_EXTRA_FLAGS]) {
446 			u32 xflags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
447 
448 			if (xflags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP) {
449 				NL_SET_ERR_MSG(extack, "Flag DONT_ENCAP_DSCP should not be set for input SA");
450 				err = -EINVAL;
451 				goto out;
452 			}
453 
454 			if (xflags & XFRM_SA_XFLAG_OSEQ_MAY_WRAP) {
455 				NL_SET_ERR_MSG(extack, "Flag OSEQ_MAY_WRAP should not be set for input SA");
456 				err = -EINVAL;
457 				goto out;
458 			}
459 
460 		}
461 	}
462 
463 out:
464 	return err;
465 }
466 
467 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
468 			   struct xfrm_algo_desc *(*get_byname)(const char *, int),
469 			   struct nlattr *rta, struct netlink_ext_ack *extack)
470 {
471 	struct xfrm_algo *p, *ualg;
472 	struct xfrm_algo_desc *algo;
473 
474 	if (!rta)
475 		return 0;
476 
477 	ualg = nla_data(rta);
478 
479 	algo = get_byname(ualg->alg_name, 1);
480 	if (!algo) {
481 		NL_SET_ERR_MSG(extack, "Requested COMP algorithm not found");
482 		return -ENOSYS;
483 	}
484 	*props = algo->desc.sadb_alg_id;
485 
486 	p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
487 	if (!p)
488 		return -ENOMEM;
489 
490 	strcpy(p->alg_name, algo->name);
491 	*algpp = p;
492 	return 0;
493 }
494 
495 static int attach_crypt(struct xfrm_state *x, struct nlattr *rta,
496 			struct netlink_ext_ack *extack)
497 {
498 	struct xfrm_algo *p, *ualg;
499 	struct xfrm_algo_desc *algo;
500 
501 	if (!rta)
502 		return 0;
503 
504 	ualg = nla_data(rta);
505 
506 	algo = xfrm_ealg_get_byname(ualg->alg_name, 1);
507 	if (!algo) {
508 		NL_SET_ERR_MSG(extack, "Requested CRYPT algorithm not found");
509 		return -ENOSYS;
510 	}
511 	x->props.ealgo = algo->desc.sadb_alg_id;
512 
513 	p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
514 	if (!p)
515 		return -ENOMEM;
516 
517 	strcpy(p->alg_name, algo->name);
518 	x->ealg = p;
519 	x->geniv = algo->uinfo.encr.geniv;
520 	return 0;
521 }
522 
523 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
524 		       struct nlattr *rta, struct netlink_ext_ack *extack)
525 {
526 	struct xfrm_algo *ualg;
527 	struct xfrm_algo_auth *p;
528 	struct xfrm_algo_desc *algo;
529 
530 	if (!rta)
531 		return 0;
532 
533 	ualg = nla_data(rta);
534 
535 	algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
536 	if (!algo) {
537 		NL_SET_ERR_MSG(extack, "Requested AUTH algorithm not found");
538 		return -ENOSYS;
539 	}
540 	*props = algo->desc.sadb_alg_id;
541 
542 	p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
543 	if (!p)
544 		return -ENOMEM;
545 
546 	strcpy(p->alg_name, algo->name);
547 	p->alg_key_len = ualg->alg_key_len;
548 	p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
549 	memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
550 
551 	*algpp = p;
552 	return 0;
553 }
554 
555 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
556 			     struct nlattr *rta, struct netlink_ext_ack *extack)
557 {
558 	struct xfrm_algo_auth *p, *ualg;
559 	struct xfrm_algo_desc *algo;
560 
561 	if (!rta)
562 		return 0;
563 
564 	ualg = nla_data(rta);
565 
566 	algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
567 	if (!algo) {
568 		NL_SET_ERR_MSG(extack, "Requested AUTH_TRUNC algorithm not found");
569 		return -ENOSYS;
570 	}
571 	if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) {
572 		NL_SET_ERR_MSG(extack, "Invalid length requested for truncated ICV");
573 		return -EINVAL;
574 	}
575 	*props = algo->desc.sadb_alg_id;
576 
577 	p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
578 	if (!p)
579 		return -ENOMEM;
580 
581 	strcpy(p->alg_name, algo->name);
582 	if (!p->alg_trunc_len)
583 		p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
584 
585 	*algpp = p;
586 	return 0;
587 }
588 
589 static int attach_aead(struct xfrm_state *x, struct nlattr *rta,
590 		       struct netlink_ext_ack *extack)
591 {
592 	struct xfrm_algo_aead *p, *ualg;
593 	struct xfrm_algo_desc *algo;
594 
595 	if (!rta)
596 		return 0;
597 
598 	ualg = nla_data(rta);
599 
600 	algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
601 	if (!algo) {
602 		NL_SET_ERR_MSG(extack, "Requested AEAD algorithm not found");
603 		return -ENOSYS;
604 	}
605 	x->props.ealgo = algo->desc.sadb_alg_id;
606 
607 	p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
608 	if (!p)
609 		return -ENOMEM;
610 
611 	strcpy(p->alg_name, algo->name);
612 	x->aead = p;
613 	x->geniv = algo->uinfo.aead.geniv;
614 	return 0;
615 }
616 
617 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
618 					 struct nlattr *rp,
619 					 struct netlink_ext_ack *extack)
620 {
621 	struct xfrm_replay_state_esn *up;
622 	unsigned int ulen;
623 
624 	if (!replay_esn || !rp)
625 		return 0;
626 
627 	up = nla_data(rp);
628 	ulen = xfrm_replay_state_esn_len(up);
629 
630 	/* Check the overall length and the internal bitmap length to avoid
631 	 * potential overflow. */
632 	if (nla_len(rp) < (int)ulen) {
633 		NL_SET_ERR_MSG(extack, "ESN attribute is too short");
634 		return -EINVAL;
635 	}
636 
637 	if (xfrm_replay_state_esn_len(replay_esn) != ulen) {
638 		NL_SET_ERR_MSG(extack, "New ESN size doesn't match the existing SA's ESN size");
639 		return -EINVAL;
640 	}
641 
642 	if (replay_esn->bmp_len != up->bmp_len) {
643 		NL_SET_ERR_MSG(extack, "New ESN bitmap size doesn't match the existing SA's ESN bitmap");
644 		return -EINVAL;
645 	}
646 
647 	if (up->replay_window > up->bmp_len * sizeof(__u32) * 8) {
648 		NL_SET_ERR_MSG(extack, "ESN replay window is longer than the bitmap");
649 		return -EINVAL;
650 	}
651 
652 	return 0;
653 }
654 
655 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
656 				       struct xfrm_replay_state_esn **preplay_esn,
657 				       struct nlattr *rta)
658 {
659 	struct xfrm_replay_state_esn *p, *pp, *up;
660 	unsigned int klen, ulen;
661 
662 	if (!rta)
663 		return 0;
664 
665 	up = nla_data(rta);
666 	klen = xfrm_replay_state_esn_len(up);
667 	ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up);
668 
669 	p = kzalloc(klen, GFP_KERNEL);
670 	if (!p)
671 		return -ENOMEM;
672 
673 	pp = kzalloc(klen, GFP_KERNEL);
674 	if (!pp) {
675 		kfree(p);
676 		return -ENOMEM;
677 	}
678 
679 	memcpy(p, up, ulen);
680 	memcpy(pp, up, ulen);
681 
682 	*replay_esn = p;
683 	*preplay_esn = pp;
684 
685 	return 0;
686 }
687 
688 static inline unsigned int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
689 {
690 	unsigned int len = 0;
691 
692 	if (xfrm_ctx) {
693 		len += sizeof(struct xfrm_user_sec_ctx);
694 		len += xfrm_ctx->ctx_len;
695 	}
696 	return len;
697 }
698 
699 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
700 {
701 	memcpy(&x->id, &p->id, sizeof(x->id));
702 	memcpy(&x->sel, &p->sel, sizeof(x->sel));
703 	memcpy(&x->lft, &p->lft, sizeof(x->lft));
704 	x->props.mode = p->mode;
705 	x->props.replay_window = min_t(unsigned int, p->replay_window,
706 					sizeof(x->replay.bitmap) * 8);
707 	x->props.reqid = p->reqid;
708 	x->props.family = p->family;
709 	memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
710 	x->props.flags = p->flags;
711 
712 	if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
713 		x->sel.family = p->family;
714 }
715 
716 /*
717  * someday when pfkey also has support, we could have the code
718  * somehow made shareable and move it to xfrm_state.c - JHS
719  *
720 */
721 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
722 				  int update_esn)
723 {
724 	struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
725 	struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
726 	struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
727 	struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
728 	struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
729 	struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
730 
731 	if (re && x->replay_esn && x->preplay_esn) {
732 		struct xfrm_replay_state_esn *replay_esn;
733 		replay_esn = nla_data(re);
734 		memcpy(x->replay_esn, replay_esn,
735 		       xfrm_replay_state_esn_len(replay_esn));
736 		memcpy(x->preplay_esn, replay_esn,
737 		       xfrm_replay_state_esn_len(replay_esn));
738 	}
739 
740 	if (rp) {
741 		struct xfrm_replay_state *replay;
742 		replay = nla_data(rp);
743 		memcpy(&x->replay, replay, sizeof(*replay));
744 		memcpy(&x->preplay, replay, sizeof(*replay));
745 	}
746 
747 	if (lt) {
748 		struct xfrm_lifetime_cur *ltime;
749 		ltime = nla_data(lt);
750 		x->curlft.bytes = ltime->bytes;
751 		x->curlft.packets = ltime->packets;
752 		x->curlft.add_time = ltime->add_time;
753 		x->curlft.use_time = ltime->use_time;
754 	}
755 
756 	if (et)
757 		x->replay_maxage = nla_get_u32(et);
758 
759 	if (rt)
760 		x->replay_maxdiff = nla_get_u32(rt);
761 
762 	if (mt)
763 		x->mapping_maxage = nla_get_u32(mt);
764 }
765 
766 static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m)
767 {
768 	if (attrs[XFRMA_SET_MARK]) {
769 		m->v = nla_get_u32(attrs[XFRMA_SET_MARK]);
770 		m->m = nla_get_u32_default(attrs[XFRMA_SET_MARK_MASK],
771 					   0xffffffff);
772 	} else {
773 		m->v = m->m = 0;
774 	}
775 }
776 
777 static struct xfrm_state *xfrm_state_construct(struct net *net,
778 					       struct xfrm_usersa_info *p,
779 					       struct nlattr **attrs,
780 					       int *errp,
781 					       struct netlink_ext_ack *extack)
782 {
783 	struct xfrm_state *x = xfrm_state_alloc(net);
784 	int err = -ENOMEM;
785 
786 	if (!x)
787 		goto error_no_put;
788 
789 	copy_from_user_state(x, p);
790 
791 	if (attrs[XFRMA_ENCAP]) {
792 		x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
793 				   sizeof(*x->encap), GFP_KERNEL);
794 		if (x->encap == NULL)
795 			goto error;
796 	}
797 
798 	if (attrs[XFRMA_COADDR]) {
799 		x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
800 				    sizeof(*x->coaddr), GFP_KERNEL);
801 		if (x->coaddr == NULL)
802 			goto error;
803 	}
804 
805 	if (attrs[XFRMA_SA_EXTRA_FLAGS])
806 		x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
807 
808 	if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD], extack)))
809 		goto error;
810 	if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
811 				     attrs[XFRMA_ALG_AUTH_TRUNC], extack)))
812 		goto error;
813 	if (!x->props.aalgo) {
814 		if ((err = attach_auth(&x->aalg, &x->props.aalgo,
815 				       attrs[XFRMA_ALG_AUTH], extack)))
816 			goto error;
817 	}
818 	if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT], extack)))
819 		goto error;
820 	if ((err = attach_one_algo(&x->calg, &x->props.calgo,
821 				   xfrm_calg_get_byname,
822 				   attrs[XFRMA_ALG_COMP], extack)))
823 		goto error;
824 
825 	if (attrs[XFRMA_TFCPAD])
826 		x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
827 
828 	xfrm_mark_get(attrs, &x->mark);
829 
830 	xfrm_smark_init(attrs, &x->props.smark);
831 
832 	if (attrs[XFRMA_IF_ID])
833 		x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
834 
835 	if (attrs[XFRMA_SA_DIR])
836 		x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]);
837 
838 	if (attrs[XFRMA_NAT_KEEPALIVE_INTERVAL])
839 		x->nat_keepalive_interval =
840 			nla_get_u32(attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]);
841 
842 	err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV], extack);
843 	if (err)
844 		goto error;
845 
846 	if (attrs[XFRMA_SEC_CTX]) {
847 		err = security_xfrm_state_alloc(x,
848 						nla_data(attrs[XFRMA_SEC_CTX]));
849 		if (err)
850 			goto error;
851 	}
852 
853 	if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
854 					       attrs[XFRMA_REPLAY_ESN_VAL])))
855 		goto error;
856 
857 	x->km.seq = p->seq;
858 	x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
859 	/* sysctl_xfrm_aevent_etime is in 100ms units */
860 	x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
861 
862 	if ((err = xfrm_init_replay(x, extack)))
863 		goto error;
864 
865 	/* override default values from above */
866 	xfrm_update_ae_params(x, attrs, 0);
867 
868 	/* configure the hardware if offload is requested */
869 	if (attrs[XFRMA_OFFLOAD_DEV]) {
870 		err = xfrm_dev_state_add(net, x,
871 					 nla_data(attrs[XFRMA_OFFLOAD_DEV]),
872 					 extack);
873 		if (err)
874 			goto error;
875 	}
876 
877 	return x;
878 
879 error:
880 	x->km.state = XFRM_STATE_DEAD;
881 	xfrm_state_put(x);
882 error_no_put:
883 	*errp = err;
884 	return NULL;
885 }
886 
887 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
888 		       struct nlattr **attrs, struct netlink_ext_ack *extack)
889 {
890 	struct net *net = sock_net(skb->sk);
891 	struct xfrm_usersa_info *p = nlmsg_data(nlh);
892 	struct xfrm_state *x;
893 	int err;
894 	struct km_event c;
895 
896 	err = verify_newsa_info(p, attrs, extack);
897 	if (err)
898 		return err;
899 
900 	x = xfrm_state_construct(net, p, attrs, &err, extack);
901 	if (!x)
902 		return err;
903 
904 	xfrm_state_hold(x);
905 	if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
906 		err = xfrm_state_add(x);
907 	else
908 		err = xfrm_state_update(x);
909 
910 	xfrm_audit_state_add(x, err ? 0 : 1, true);
911 
912 	if (err < 0) {
913 		x->km.state = XFRM_STATE_DEAD;
914 		xfrm_dev_state_delete(x);
915 		__xfrm_state_put(x);
916 		goto out;
917 	}
918 
919 	if (x->km.state == XFRM_STATE_VOID)
920 		x->km.state = XFRM_STATE_VALID;
921 
922 	c.seq = nlh->nlmsg_seq;
923 	c.portid = nlh->nlmsg_pid;
924 	c.event = nlh->nlmsg_type;
925 
926 	km_state_notify(x, &c);
927 out:
928 	xfrm_state_put(x);
929 	return err;
930 }
931 
932 static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
933 						 struct xfrm_usersa_id *p,
934 						 struct nlattr **attrs,
935 						 int *errp)
936 {
937 	struct xfrm_state *x = NULL;
938 	struct xfrm_mark m;
939 	int err;
940 	u32 mark = xfrm_mark_get(attrs, &m);
941 
942 	if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
943 		err = -ESRCH;
944 		x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
945 	} else {
946 		xfrm_address_t *saddr = NULL;
947 
948 		verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
949 		if (!saddr) {
950 			err = -EINVAL;
951 			goto out;
952 		}
953 
954 		err = -ESRCH;
955 		x = xfrm_state_lookup_byaddr(net, mark,
956 					     &p->daddr, saddr,
957 					     p->proto, p->family);
958 	}
959 
960  out:
961 	if (!x && errp)
962 		*errp = err;
963 	return x;
964 }
965 
966 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
967 		       struct nlattr **attrs, struct netlink_ext_ack *extack)
968 {
969 	struct net *net = sock_net(skb->sk);
970 	struct xfrm_state *x;
971 	int err = -ESRCH;
972 	struct km_event c;
973 	struct xfrm_usersa_id *p = nlmsg_data(nlh);
974 
975 	x = xfrm_user_state_lookup(net, p, attrs, &err);
976 	if (x == NULL)
977 		return err;
978 
979 	if ((err = security_xfrm_state_delete(x)) != 0)
980 		goto out;
981 
982 	if (xfrm_state_kern(x)) {
983 		NL_SET_ERR_MSG(extack, "SA is in use by tunnels");
984 		err = -EPERM;
985 		goto out;
986 	}
987 
988 	err = xfrm_state_delete(x);
989 	if (err < 0)
990 		goto out;
991 
992 	c.seq = nlh->nlmsg_seq;
993 	c.portid = nlh->nlmsg_pid;
994 	c.event = nlh->nlmsg_type;
995 	km_state_notify(x, &c);
996 
997 out:
998 	xfrm_audit_state_delete(x, err ? 0 : 1, true);
999 	xfrm_state_put(x);
1000 	return err;
1001 }
1002 
1003 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
1004 {
1005 	memset(p, 0, sizeof(*p));
1006 	memcpy(&p->id, &x->id, sizeof(p->id));
1007 	memcpy(&p->sel, &x->sel, sizeof(p->sel));
1008 	memcpy(&p->lft, &x->lft, sizeof(p->lft));
1009 	if (x->xso.dev)
1010 		xfrm_dev_state_update_stats(x);
1011 	memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
1012 	put_unaligned(x->stats.replay_window, &p->stats.replay_window);
1013 	put_unaligned(x->stats.replay, &p->stats.replay);
1014 	put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
1015 	memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
1016 	p->mode = x->props.mode;
1017 	p->replay_window = x->props.replay_window;
1018 	p->reqid = x->props.reqid;
1019 	p->family = x->props.family;
1020 	p->flags = x->props.flags;
1021 	p->seq = x->km.seq;
1022 }
1023 
1024 struct xfrm_dump_info {
1025 	struct sk_buff *in_skb;
1026 	struct sk_buff *out_skb;
1027 	u32 nlmsg_seq;
1028 	u16 nlmsg_flags;
1029 };
1030 
1031 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
1032 {
1033 	struct xfrm_user_sec_ctx *uctx;
1034 	struct nlattr *attr;
1035 	int ctx_size = sizeof(*uctx) + s->ctx_len;
1036 
1037 	attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
1038 	if (attr == NULL)
1039 		return -EMSGSIZE;
1040 
1041 	uctx = nla_data(attr);
1042 	uctx->exttype = XFRMA_SEC_CTX;
1043 	uctx->len = ctx_size;
1044 	uctx->ctx_doi = s->ctx_doi;
1045 	uctx->ctx_alg = s->ctx_alg;
1046 	uctx->ctx_len = s->ctx_len;
1047 	memcpy(uctx + 1, s->ctx_str, s->ctx_len);
1048 
1049 	return 0;
1050 }
1051 
1052 static int copy_user_offload(struct xfrm_dev_offload *xso, struct sk_buff *skb)
1053 {
1054 	struct xfrm_user_offload *xuo;
1055 	struct nlattr *attr;
1056 
1057 	attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo));
1058 	if (attr == NULL)
1059 		return -EMSGSIZE;
1060 
1061 	xuo = nla_data(attr);
1062 	memset(xuo, 0, sizeof(*xuo));
1063 	xuo->ifindex = xso->dev->ifindex;
1064 	if (xso->dir == XFRM_DEV_OFFLOAD_IN)
1065 		xuo->flags = XFRM_OFFLOAD_INBOUND;
1066 	if (xso->type == XFRM_DEV_OFFLOAD_PACKET)
1067 		xuo->flags |= XFRM_OFFLOAD_PACKET;
1068 
1069 	return 0;
1070 }
1071 
1072 static bool xfrm_redact(void)
1073 {
1074 	return IS_ENABLED(CONFIG_SECURITY) &&
1075 		security_locked_down(LOCKDOWN_XFRM_SECRET);
1076 }
1077 
1078 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
1079 {
1080 	struct xfrm_algo *algo;
1081 	struct xfrm_algo_auth *ap;
1082 	struct nlattr *nla;
1083 	bool redact_secret = xfrm_redact();
1084 
1085 	nla = nla_reserve(skb, XFRMA_ALG_AUTH,
1086 			  sizeof(*algo) + (auth->alg_key_len + 7) / 8);
1087 	if (!nla)
1088 		return -EMSGSIZE;
1089 	algo = nla_data(nla);
1090 	strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
1091 
1092 	if (redact_secret && auth->alg_key_len)
1093 		memset(algo->alg_key, 0, (auth->alg_key_len + 7) / 8);
1094 	else
1095 		memcpy(algo->alg_key, auth->alg_key,
1096 		       (auth->alg_key_len + 7) / 8);
1097 	algo->alg_key_len = auth->alg_key_len;
1098 
1099 	nla = nla_reserve(skb, XFRMA_ALG_AUTH_TRUNC, xfrm_alg_auth_len(auth));
1100 	if (!nla)
1101 		return -EMSGSIZE;
1102 	ap = nla_data(nla);
1103 	strscpy_pad(ap->alg_name, auth->alg_name, sizeof(ap->alg_name));
1104 	ap->alg_key_len = auth->alg_key_len;
1105 	ap->alg_trunc_len = auth->alg_trunc_len;
1106 	if (redact_secret && auth->alg_key_len)
1107 		memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8);
1108 	else
1109 		memcpy(ap->alg_key, auth->alg_key,
1110 		       (auth->alg_key_len + 7) / 8);
1111 	return 0;
1112 }
1113 
1114 static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb)
1115 {
1116 	struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_AEAD, aead_len(aead));
1117 	struct xfrm_algo_aead *ap;
1118 	bool redact_secret = xfrm_redact();
1119 
1120 	if (!nla)
1121 		return -EMSGSIZE;
1122 
1123 	ap = nla_data(nla);
1124 	strscpy_pad(ap->alg_name, aead->alg_name, sizeof(ap->alg_name));
1125 	ap->alg_key_len = aead->alg_key_len;
1126 	ap->alg_icv_len = aead->alg_icv_len;
1127 
1128 	if (redact_secret && aead->alg_key_len)
1129 		memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8);
1130 	else
1131 		memcpy(ap->alg_key, aead->alg_key,
1132 		       (aead->alg_key_len + 7) / 8);
1133 	return 0;
1134 }
1135 
1136 static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb)
1137 {
1138 	struct xfrm_algo *ap;
1139 	bool redact_secret = xfrm_redact();
1140 	struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_CRYPT,
1141 					 xfrm_alg_len(ealg));
1142 	if (!nla)
1143 		return -EMSGSIZE;
1144 
1145 	ap = nla_data(nla);
1146 	strscpy_pad(ap->alg_name, ealg->alg_name, sizeof(ap->alg_name));
1147 	ap->alg_key_len = ealg->alg_key_len;
1148 
1149 	if (redact_secret && ealg->alg_key_len)
1150 		memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8);
1151 	else
1152 		memcpy(ap->alg_key, ealg->alg_key,
1153 		       (ealg->alg_key_len + 7) / 8);
1154 
1155 	return 0;
1156 }
1157 
1158 static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb)
1159 {
1160 	struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg));
1161 	struct xfrm_algo *ap;
1162 
1163 	if (!nla)
1164 		return -EMSGSIZE;
1165 
1166 	ap = nla_data(nla);
1167 	strscpy_pad(ap->alg_name, calg->alg_name, sizeof(ap->alg_name));
1168 	ap->alg_key_len = 0;
1169 
1170 	return 0;
1171 }
1172 
1173 static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb)
1174 {
1175 	struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep));
1176 	struct xfrm_encap_tmpl *uep;
1177 
1178 	if (!nla)
1179 		return -EMSGSIZE;
1180 
1181 	uep = nla_data(nla);
1182 	memset(uep, 0, sizeof(*uep));
1183 
1184 	uep->encap_type = ep->encap_type;
1185 	uep->encap_sport = ep->encap_sport;
1186 	uep->encap_dport = ep->encap_dport;
1187 	uep->encap_oa = ep->encap_oa;
1188 
1189 	return 0;
1190 }
1191 
1192 static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
1193 {
1194 	int ret = 0;
1195 
1196 	if (m->v | m->m) {
1197 		ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v);
1198 		if (!ret)
1199 			ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m);
1200 	}
1201 	return ret;
1202 }
1203 
1204 /* Don't change this without updating xfrm_sa_len! */
1205 static int copy_to_user_state_extra(struct xfrm_state *x,
1206 				    struct xfrm_usersa_info *p,
1207 				    struct sk_buff *skb)
1208 {
1209 	int ret = 0;
1210 
1211 	copy_to_user_state(x, p);
1212 
1213 	if (x->props.extra_flags) {
1214 		ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
1215 				  x->props.extra_flags);
1216 		if (ret)
1217 			goto out;
1218 	}
1219 
1220 	if (x->coaddr) {
1221 		ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
1222 		if (ret)
1223 			goto out;
1224 	}
1225 	if (x->lastused) {
1226 		ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
1227 					XFRMA_PAD);
1228 		if (ret)
1229 			goto out;
1230 	}
1231 	if (x->aead) {
1232 		ret = copy_to_user_aead(x->aead, skb);
1233 		if (ret)
1234 			goto out;
1235 	}
1236 	if (x->aalg) {
1237 		ret = copy_to_user_auth(x->aalg, skb);
1238 		if (ret)
1239 			goto out;
1240 	}
1241 	if (x->ealg) {
1242 		ret = copy_to_user_ealg(x->ealg, skb);
1243 		if (ret)
1244 			goto out;
1245 	}
1246 	if (x->calg) {
1247 		ret = copy_to_user_calg(x->calg, skb);
1248 		if (ret)
1249 			goto out;
1250 	}
1251 	if (x->encap) {
1252 		ret = copy_to_user_encap(x->encap, skb);
1253 		if (ret)
1254 			goto out;
1255 	}
1256 	if (x->tfcpad) {
1257 		ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
1258 		if (ret)
1259 			goto out;
1260 	}
1261 	ret = xfrm_mark_put(skb, &x->mark);
1262 	if (ret)
1263 		goto out;
1264 
1265 	ret = xfrm_smark_put(skb, &x->props.smark);
1266 	if (ret)
1267 		goto out;
1268 
1269 	if (x->replay_esn)
1270 		ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
1271 			      xfrm_replay_state_esn_len(x->replay_esn),
1272 			      x->replay_esn);
1273 	else
1274 		ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
1275 			      &x->replay);
1276 	if (ret)
1277 		goto out;
1278 	if(x->xso.dev)
1279 		ret = copy_user_offload(&x->xso, skb);
1280 	if (ret)
1281 		goto out;
1282 	if (x->if_id) {
1283 		ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id);
1284 		if (ret)
1285 			goto out;
1286 	}
1287 	if (x->security) {
1288 		ret = copy_sec_ctx(x->security, skb);
1289 		if (ret)
1290 			goto out;
1291 	}
1292 	if (x->mapping_maxage) {
1293 		ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage);
1294 		if (ret)
1295 			goto out;
1296 	}
1297 	if (x->dir)
1298 		ret = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
1299 
1300 	if (x->nat_keepalive_interval) {
1301 		ret = nla_put_u32(skb, XFRMA_NAT_KEEPALIVE_INTERVAL,
1302 				  x->nat_keepalive_interval);
1303 		if (ret)
1304 			goto out;
1305 	}
1306 out:
1307 	return ret;
1308 }
1309 
1310 static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
1311 {
1312 	struct xfrm_dump_info *sp = ptr;
1313 	struct sk_buff *in_skb = sp->in_skb;
1314 	struct sk_buff *skb = sp->out_skb;
1315 	struct xfrm_translator *xtr;
1316 	struct xfrm_usersa_info *p;
1317 	struct nlmsghdr *nlh;
1318 	int err;
1319 
1320 	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
1321 			XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
1322 	if (nlh == NULL)
1323 		return -EMSGSIZE;
1324 
1325 	p = nlmsg_data(nlh);
1326 
1327 	err = copy_to_user_state_extra(x, p, skb);
1328 	if (err) {
1329 		nlmsg_cancel(skb, nlh);
1330 		return err;
1331 	}
1332 	nlmsg_end(skb, nlh);
1333 
1334 	xtr = xfrm_get_translator();
1335 	if (xtr) {
1336 		err = xtr->alloc_compat(skb, nlh);
1337 
1338 		xfrm_put_translator(xtr);
1339 		if (err) {
1340 			nlmsg_cancel(skb, nlh);
1341 			return err;
1342 		}
1343 	}
1344 
1345 	return 0;
1346 }
1347 
1348 static int xfrm_dump_sa_done(struct netlink_callback *cb)
1349 {
1350 	struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
1351 	struct sock *sk = cb->skb->sk;
1352 	struct net *net = sock_net(sk);
1353 
1354 	if (cb->args[0])
1355 		xfrm_state_walk_done(walk, net);
1356 	return 0;
1357 }
1358 
1359 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
1360 {
1361 	struct net *net = sock_net(skb->sk);
1362 	struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
1363 	struct xfrm_dump_info info;
1364 
1365 	BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
1366 		     sizeof(cb->args) - sizeof(cb->args[0]));
1367 
1368 	info.in_skb = cb->skb;
1369 	info.out_skb = skb;
1370 	info.nlmsg_seq = cb->nlh->nlmsg_seq;
1371 	info.nlmsg_flags = NLM_F_MULTI;
1372 
1373 	if (!cb->args[0]) {
1374 		struct nlattr *attrs[XFRMA_MAX+1];
1375 		struct xfrm_address_filter *filter = NULL;
1376 		u8 proto = 0;
1377 		int err;
1378 
1379 		err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX,
1380 					     xfrma_policy, cb->extack);
1381 		if (err < 0)
1382 			return err;
1383 
1384 		if (attrs[XFRMA_ADDRESS_FILTER]) {
1385 			filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
1386 					 sizeof(*filter), GFP_KERNEL);
1387 			if (filter == NULL)
1388 				return -ENOMEM;
1389 
1390 			/* see addr_match(), (prefix length >> 5) << 2
1391 			 * will be used to compare xfrm_address_t
1392 			 */
1393 			if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
1394 			    filter->dplen > (sizeof(xfrm_address_t) << 3)) {
1395 				kfree(filter);
1396 				return -EINVAL;
1397 			}
1398 		}
1399 
1400 		if (attrs[XFRMA_PROTO])
1401 			proto = nla_get_u8(attrs[XFRMA_PROTO]);
1402 
1403 		xfrm_state_walk_init(walk, proto, filter);
1404 		cb->args[0] = 1;
1405 	}
1406 
1407 	(void) xfrm_state_walk(net, walk, dump_one_state, &info);
1408 
1409 	return skb->len;
1410 }
1411 
1412 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
1413 					  struct xfrm_state *x, u32 seq)
1414 {
1415 	struct xfrm_dump_info info;
1416 	struct sk_buff *skb;
1417 	int err;
1418 
1419 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1420 	if (!skb)
1421 		return ERR_PTR(-ENOMEM);
1422 
1423 	info.in_skb = in_skb;
1424 	info.out_skb = skb;
1425 	info.nlmsg_seq = seq;
1426 	info.nlmsg_flags = 0;
1427 
1428 	err = dump_one_state(x, 0, &info);
1429 	if (err) {
1430 		kfree_skb(skb);
1431 		return ERR_PTR(err);
1432 	}
1433 
1434 	return skb;
1435 }
1436 
1437 /* A wrapper for nlmsg_multicast() checking that nlsk is still available.
1438  * Must be called with RCU read lock.
1439  */
1440 static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
1441 				       u32 pid, unsigned int group)
1442 {
1443 	struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
1444 	struct xfrm_translator *xtr;
1445 
1446 	if (!nlsk) {
1447 		kfree_skb(skb);
1448 		return -EPIPE;
1449 	}
1450 
1451 	xtr = xfrm_get_translator();
1452 	if (xtr) {
1453 		int err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
1454 
1455 		xfrm_put_translator(xtr);
1456 		if (err) {
1457 			kfree_skb(skb);
1458 			return err;
1459 		}
1460 	}
1461 
1462 	return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
1463 }
1464 
1465 static inline unsigned int xfrm_spdinfo_msgsize(void)
1466 {
1467 	return NLMSG_ALIGN(4)
1468 	       + nla_total_size(sizeof(struct xfrmu_spdinfo))
1469 	       + nla_total_size(sizeof(struct xfrmu_spdhinfo))
1470 	       + nla_total_size(sizeof(struct xfrmu_spdhthresh))
1471 	       + nla_total_size(sizeof(struct xfrmu_spdhthresh));
1472 }
1473 
1474 static int build_spdinfo(struct sk_buff *skb, struct net *net,
1475 			 u32 portid, u32 seq, u32 flags)
1476 {
1477 	struct xfrmk_spdinfo si;
1478 	struct xfrmu_spdinfo spc;
1479 	struct xfrmu_spdhinfo sph;
1480 	struct xfrmu_spdhthresh spt4, spt6;
1481 	struct nlmsghdr *nlh;
1482 	int err;
1483 	u32 *f;
1484 	unsigned lseq;
1485 
1486 	nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
1487 	if (nlh == NULL) /* shouldn't really happen ... */
1488 		return -EMSGSIZE;
1489 
1490 	f = nlmsg_data(nlh);
1491 	*f = flags;
1492 	xfrm_spd_getinfo(net, &si);
1493 	spc.incnt = si.incnt;
1494 	spc.outcnt = si.outcnt;
1495 	spc.fwdcnt = si.fwdcnt;
1496 	spc.inscnt = si.inscnt;
1497 	spc.outscnt = si.outscnt;
1498 	spc.fwdscnt = si.fwdscnt;
1499 	sph.spdhcnt = si.spdhcnt;
1500 	sph.spdhmcnt = si.spdhmcnt;
1501 
1502 	do {
1503 		lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1504 
1505 		spt4.lbits = net->xfrm.policy_hthresh.lbits4;
1506 		spt4.rbits = net->xfrm.policy_hthresh.rbits4;
1507 		spt6.lbits = net->xfrm.policy_hthresh.lbits6;
1508 		spt6.rbits = net->xfrm.policy_hthresh.rbits6;
1509 	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq));
1510 
1511 	err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
1512 	if (!err)
1513 		err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
1514 	if (!err)
1515 		err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4);
1516 	if (!err)
1517 		err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6);
1518 	if (err) {
1519 		nlmsg_cancel(skb, nlh);
1520 		return err;
1521 	}
1522 
1523 	nlmsg_end(skb, nlh);
1524 	return 0;
1525 }
1526 
1527 static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1528 			    struct nlattr **attrs,
1529 			    struct netlink_ext_ack *extack)
1530 {
1531 	struct net *net = sock_net(skb->sk);
1532 	struct xfrmu_spdhthresh *thresh4 = NULL;
1533 	struct xfrmu_spdhthresh *thresh6 = NULL;
1534 
1535 	/* selector prefixlen thresholds to hash policies */
1536 	if (attrs[XFRMA_SPD_IPV4_HTHRESH]) {
1537 		struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH];
1538 
1539 		if (nla_len(rta) < sizeof(*thresh4)) {
1540 			NL_SET_ERR_MSG(extack, "Invalid SPD_IPV4_HTHRESH attribute length");
1541 			return -EINVAL;
1542 		}
1543 		thresh4 = nla_data(rta);
1544 		if (thresh4->lbits > 32 || thresh4->rbits > 32) {
1545 			NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 32 for IPv4)");
1546 			return -EINVAL;
1547 		}
1548 	}
1549 	if (attrs[XFRMA_SPD_IPV6_HTHRESH]) {
1550 		struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH];
1551 
1552 		if (nla_len(rta) < sizeof(*thresh6)) {
1553 			NL_SET_ERR_MSG(extack, "Invalid SPD_IPV6_HTHRESH attribute length");
1554 			return -EINVAL;
1555 		}
1556 		thresh6 = nla_data(rta);
1557 		if (thresh6->lbits > 128 || thresh6->rbits > 128) {
1558 			NL_SET_ERR_MSG(extack, "Invalid hash threshold (must be <= 128 for IPv6)");
1559 			return -EINVAL;
1560 		}
1561 	}
1562 
1563 	if (thresh4 || thresh6) {
1564 		write_seqlock(&net->xfrm.policy_hthresh.lock);
1565 		if (thresh4) {
1566 			net->xfrm.policy_hthresh.lbits4 = thresh4->lbits;
1567 			net->xfrm.policy_hthresh.rbits4 = thresh4->rbits;
1568 		}
1569 		if (thresh6) {
1570 			net->xfrm.policy_hthresh.lbits6 = thresh6->lbits;
1571 			net->xfrm.policy_hthresh.rbits6 = thresh6->rbits;
1572 		}
1573 		write_sequnlock(&net->xfrm.policy_hthresh.lock);
1574 
1575 		xfrm_policy_hash_rebuild(net);
1576 	}
1577 
1578 	return 0;
1579 }
1580 
1581 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1582 			    struct nlattr **attrs,
1583 			    struct netlink_ext_ack *extack)
1584 {
1585 	struct net *net = sock_net(skb->sk);
1586 	struct sk_buff *r_skb;
1587 	u32 *flags = nlmsg_data(nlh);
1588 	u32 sportid = NETLINK_CB(skb).portid;
1589 	u32 seq = nlh->nlmsg_seq;
1590 	int err;
1591 
1592 	r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
1593 	if (r_skb == NULL)
1594 		return -ENOMEM;
1595 
1596 	err = build_spdinfo(r_skb, net, sportid, seq, *flags);
1597 	BUG_ON(err < 0);
1598 
1599 	return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1600 }
1601 
1602 static inline unsigned int xfrm_sadinfo_msgsize(void)
1603 {
1604 	return NLMSG_ALIGN(4)
1605 	       + nla_total_size(sizeof(struct xfrmu_sadhinfo))
1606 	       + nla_total_size(4); /* XFRMA_SAD_CNT */
1607 }
1608 
1609 static int build_sadinfo(struct sk_buff *skb, struct net *net,
1610 			 u32 portid, u32 seq, u32 flags)
1611 {
1612 	struct xfrmk_sadinfo si;
1613 	struct xfrmu_sadhinfo sh;
1614 	struct nlmsghdr *nlh;
1615 	int err;
1616 	u32 *f;
1617 
1618 	nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
1619 	if (nlh == NULL) /* shouldn't really happen ... */
1620 		return -EMSGSIZE;
1621 
1622 	f = nlmsg_data(nlh);
1623 	*f = flags;
1624 	xfrm_sad_getinfo(net, &si);
1625 
1626 	sh.sadhmcnt = si.sadhmcnt;
1627 	sh.sadhcnt = si.sadhcnt;
1628 
1629 	err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
1630 	if (!err)
1631 		err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
1632 	if (err) {
1633 		nlmsg_cancel(skb, nlh);
1634 		return err;
1635 	}
1636 
1637 	nlmsg_end(skb, nlh);
1638 	return 0;
1639 }
1640 
1641 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1642 			    struct nlattr **attrs,
1643 			    struct netlink_ext_ack *extack)
1644 {
1645 	struct net *net = sock_net(skb->sk);
1646 	struct sk_buff *r_skb;
1647 	u32 *flags = nlmsg_data(nlh);
1648 	u32 sportid = NETLINK_CB(skb).portid;
1649 	u32 seq = nlh->nlmsg_seq;
1650 	int err;
1651 
1652 	r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
1653 	if (r_skb == NULL)
1654 		return -ENOMEM;
1655 
1656 	err = build_sadinfo(r_skb, net, sportid, seq, *flags);
1657 	BUG_ON(err < 0);
1658 
1659 	return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1660 }
1661 
1662 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1663 		       struct nlattr **attrs, struct netlink_ext_ack *extack)
1664 {
1665 	struct net *net = sock_net(skb->sk);
1666 	struct xfrm_usersa_id *p = nlmsg_data(nlh);
1667 	struct xfrm_state *x;
1668 	struct sk_buff *resp_skb;
1669 	int err = -ESRCH;
1670 
1671 	x = xfrm_user_state_lookup(net, p, attrs, &err);
1672 	if (x == NULL)
1673 		goto out_noput;
1674 
1675 	resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1676 	if (IS_ERR(resp_skb)) {
1677 		err = PTR_ERR(resp_skb);
1678 	} else {
1679 		err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1680 	}
1681 	xfrm_state_put(x);
1682 out_noput:
1683 	return err;
1684 }
1685 
1686 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1687 			      struct nlattr **attrs,
1688 			      struct netlink_ext_ack *extack)
1689 {
1690 	struct net *net = sock_net(skb->sk);
1691 	struct xfrm_state *x;
1692 	struct xfrm_userspi_info *p;
1693 	struct xfrm_translator *xtr;
1694 	struct sk_buff *resp_skb;
1695 	xfrm_address_t *daddr;
1696 	int family;
1697 	int err;
1698 	u32 mark;
1699 	struct xfrm_mark m;
1700 	u32 if_id = 0;
1701 
1702 	p = nlmsg_data(nlh);
1703 	err = verify_spi_info(p->info.id.proto, p->min, p->max, extack);
1704 	if (err)
1705 		goto out_noput;
1706 
1707 	family = p->info.family;
1708 	daddr = &p->info.id.daddr;
1709 
1710 	x = NULL;
1711 
1712 	mark = xfrm_mark_get(attrs, &m);
1713 
1714 	if (attrs[XFRMA_IF_ID])
1715 		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
1716 
1717 	if (p->info.seq) {
1718 		x = xfrm_find_acq_byseq(net, mark, p->info.seq);
1719 		if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
1720 			xfrm_state_put(x);
1721 			x = NULL;
1722 		}
1723 	}
1724 
1725 	if (!x)
1726 		x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
1727 				  if_id, p->info.id.proto, daddr,
1728 				  &p->info.saddr, 1,
1729 				  family);
1730 	err = -ENOENT;
1731 	if (!x) {
1732 		NL_SET_ERR_MSG(extack, "Target ACQUIRE not found");
1733 		goto out_noput;
1734 	}
1735 
1736 	err = xfrm_alloc_spi(x, p->min, p->max, extack);
1737 	if (err)
1738 		goto out;
1739 
1740 	if (attrs[XFRMA_SA_DIR])
1741 		x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]);
1742 
1743 	resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1744 	if (IS_ERR(resp_skb)) {
1745 		err = PTR_ERR(resp_skb);
1746 		goto out;
1747 	}
1748 
1749 	xtr = xfrm_get_translator();
1750 	if (xtr) {
1751 		err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
1752 
1753 		xfrm_put_translator(xtr);
1754 		if (err) {
1755 			kfree_skb(resp_skb);
1756 			goto out;
1757 		}
1758 	}
1759 
1760 	err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1761 
1762 out:
1763 	xfrm_state_put(x);
1764 out_noput:
1765 	return err;
1766 }
1767 
1768 static int verify_policy_dir(u8 dir, struct netlink_ext_ack *extack)
1769 {
1770 	switch (dir) {
1771 	case XFRM_POLICY_IN:
1772 	case XFRM_POLICY_OUT:
1773 	case XFRM_POLICY_FWD:
1774 		break;
1775 
1776 	default:
1777 		NL_SET_ERR_MSG(extack, "Invalid policy direction");
1778 		return -EINVAL;
1779 	}
1780 
1781 	return 0;
1782 }
1783 
1784 static int verify_policy_type(u8 type, struct netlink_ext_ack *extack)
1785 {
1786 	switch (type) {
1787 	case XFRM_POLICY_TYPE_MAIN:
1788 #ifdef CONFIG_XFRM_SUB_POLICY
1789 	case XFRM_POLICY_TYPE_SUB:
1790 #endif
1791 		break;
1792 
1793 	default:
1794 		NL_SET_ERR_MSG(extack, "Invalid policy type");
1795 		return -EINVAL;
1796 	}
1797 
1798 	return 0;
1799 }
1800 
1801 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p,
1802 				 struct netlink_ext_ack *extack)
1803 {
1804 	int ret;
1805 
1806 	switch (p->share) {
1807 	case XFRM_SHARE_ANY:
1808 	case XFRM_SHARE_SESSION:
1809 	case XFRM_SHARE_USER:
1810 	case XFRM_SHARE_UNIQUE:
1811 		break;
1812 
1813 	default:
1814 		NL_SET_ERR_MSG(extack, "Invalid policy share");
1815 		return -EINVAL;
1816 	}
1817 
1818 	switch (p->action) {
1819 	case XFRM_POLICY_ALLOW:
1820 	case XFRM_POLICY_BLOCK:
1821 		break;
1822 
1823 	default:
1824 		NL_SET_ERR_MSG(extack, "Invalid policy action");
1825 		return -EINVAL;
1826 	}
1827 
1828 	switch (p->sel.family) {
1829 	case AF_INET:
1830 		if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32) {
1831 			NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 32 for IPv4)");
1832 			return -EINVAL;
1833 		}
1834 
1835 		break;
1836 
1837 	case AF_INET6:
1838 #if IS_ENABLED(CONFIG_IPV6)
1839 		if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128) {
1840 			NL_SET_ERR_MSG(extack, "Invalid prefix length in selector (must be <= 128 for IPv6)");
1841 			return -EINVAL;
1842 		}
1843 
1844 		break;
1845 #else
1846 		NL_SET_ERR_MSG(extack, "IPv6 support disabled");
1847 		return  -EAFNOSUPPORT;
1848 #endif
1849 
1850 	default:
1851 		NL_SET_ERR_MSG(extack, "Invalid selector family");
1852 		return -EINVAL;
1853 	}
1854 
1855 	ret = verify_policy_dir(p->dir, extack);
1856 	if (ret)
1857 		return ret;
1858 	if (p->index && (xfrm_policy_id2dir(p->index) != p->dir)) {
1859 		NL_SET_ERR_MSG(extack, "Policy index doesn't match direction");
1860 		return -EINVAL;
1861 	}
1862 
1863 	return 0;
1864 }
1865 
1866 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
1867 {
1868 	struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1869 	struct xfrm_user_sec_ctx *uctx;
1870 
1871 	if (!rt)
1872 		return 0;
1873 
1874 	uctx = nla_data(rt);
1875 	return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
1876 }
1877 
1878 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1879 			   int nr)
1880 {
1881 	int i;
1882 
1883 	xp->xfrm_nr = nr;
1884 	for (i = 0; i < nr; i++, ut++) {
1885 		struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1886 
1887 		memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1888 		memcpy(&t->saddr, &ut->saddr,
1889 		       sizeof(xfrm_address_t));
1890 		t->reqid = ut->reqid;
1891 		t->mode = ut->mode;
1892 		t->share = ut->share;
1893 		t->optional = ut->optional;
1894 		t->aalgos = ut->aalgos;
1895 		t->ealgos = ut->ealgos;
1896 		t->calgos = ut->calgos;
1897 		/* If all masks are ~0, then we allow all algorithms. */
1898 		t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
1899 		t->encap_family = ut->family;
1900 	}
1901 }
1902 
1903 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family,
1904 			 int dir, struct netlink_ext_ack *extack)
1905 {
1906 	u16 prev_family;
1907 	int i;
1908 
1909 	if (nr > XFRM_MAX_DEPTH) {
1910 		NL_SET_ERR_MSG(extack, "Template count must be <= XFRM_MAX_DEPTH (" __stringify(XFRM_MAX_DEPTH) ")");
1911 		return -EINVAL;
1912 	}
1913 
1914 	prev_family = family;
1915 
1916 	for (i = 0; i < nr; i++) {
1917 		/* We never validated the ut->family value, so many
1918 		 * applications simply leave it at zero.  The check was
1919 		 * never made and ut->family was ignored because all
1920 		 * templates could be assumed to have the same family as
1921 		 * the policy itself.  Now that we will have ipv4-in-ipv6
1922 		 * and ipv6-in-ipv4 tunnels, this is no longer true.
1923 		 */
1924 		if (!ut[i].family)
1925 			ut[i].family = family;
1926 
1927 		switch (ut[i].mode) {
1928 		case XFRM_MODE_TUNNEL:
1929 		case XFRM_MODE_BEET:
1930 			if (ut[i].optional && dir == XFRM_POLICY_OUT) {
1931 				NL_SET_ERR_MSG(extack, "Mode in optional template not allowed in outbound policy");
1932 				return -EINVAL;
1933 			}
1934 			break;
1935 		default:
1936 			if (ut[i].family != prev_family) {
1937 				NL_SET_ERR_MSG(extack, "Mode in template doesn't support a family change");
1938 				return -EINVAL;
1939 			}
1940 			break;
1941 		}
1942 		if (ut[i].mode >= XFRM_MODE_MAX) {
1943 			NL_SET_ERR_MSG(extack, "Mode in template must be < XFRM_MODE_MAX (" __stringify(XFRM_MODE_MAX) ")");
1944 			return -EINVAL;
1945 		}
1946 
1947 		prev_family = ut[i].family;
1948 
1949 		switch (ut[i].family) {
1950 		case AF_INET:
1951 			break;
1952 #if IS_ENABLED(CONFIG_IPV6)
1953 		case AF_INET6:
1954 			break;
1955 #endif
1956 		default:
1957 			NL_SET_ERR_MSG(extack, "Invalid family in template");
1958 			return -EINVAL;
1959 		}
1960 
1961 		if (!xfrm_id_proto_valid(ut[i].id.proto)) {
1962 			NL_SET_ERR_MSG(extack, "Invalid XFRM protocol in template");
1963 			return -EINVAL;
1964 		}
1965 	}
1966 
1967 	return 0;
1968 }
1969 
1970 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs,
1971 			       int dir, struct netlink_ext_ack *extack)
1972 {
1973 	struct nlattr *rt = attrs[XFRMA_TMPL];
1974 
1975 	if (!rt) {
1976 		pol->xfrm_nr = 0;
1977 	} else {
1978 		struct xfrm_user_tmpl *utmpl = nla_data(rt);
1979 		int nr = nla_len(rt) / sizeof(*utmpl);
1980 		int err;
1981 
1982 		err = validate_tmpl(nr, utmpl, pol->family, dir, extack);
1983 		if (err)
1984 			return err;
1985 
1986 		copy_templates(pol, utmpl, nr);
1987 	}
1988 	return 0;
1989 }
1990 
1991 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs,
1992 				      struct netlink_ext_ack *extack)
1993 {
1994 	struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1995 	struct xfrm_userpolicy_type *upt;
1996 	u8 type = XFRM_POLICY_TYPE_MAIN;
1997 	int err;
1998 
1999 	if (rt) {
2000 		upt = nla_data(rt);
2001 		type = upt->type;
2002 	}
2003 
2004 	err = verify_policy_type(type, extack);
2005 	if (err)
2006 		return err;
2007 
2008 	*tp = type;
2009 	return 0;
2010 }
2011 
2012 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
2013 {
2014 	xp->priority = p->priority;
2015 	xp->index = p->index;
2016 	memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
2017 	memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
2018 	xp->action = p->action;
2019 	xp->flags = p->flags;
2020 	xp->family = p->sel.family;
2021 	/* XXX xp->share = p->share; */
2022 }
2023 
2024 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
2025 {
2026 	memset(p, 0, sizeof(*p));
2027 	memcpy(&p->sel, &xp->selector, sizeof(p->sel));
2028 	memcpy(&p->lft, &xp->lft, sizeof(p->lft));
2029 	memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
2030 	p->priority = xp->priority;
2031 	p->index = xp->index;
2032 	p->sel.family = xp->family;
2033 	p->dir = dir;
2034 	p->action = xp->action;
2035 	p->flags = xp->flags;
2036 	p->share = XFRM_SHARE_ANY; /* XXX xp->share */
2037 }
2038 
2039 static struct xfrm_policy *xfrm_policy_construct(struct net *net,
2040 						 struct xfrm_userpolicy_info *p,
2041 						 struct nlattr **attrs,
2042 						 int *errp,
2043 						 struct netlink_ext_ack *extack)
2044 {
2045 	struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
2046 	int err;
2047 
2048 	if (!xp) {
2049 		*errp = -ENOMEM;
2050 		return NULL;
2051 	}
2052 
2053 	copy_from_user_policy(xp, p);
2054 
2055 	err = copy_from_user_policy_type(&xp->type, attrs, extack);
2056 	if (err)
2057 		goto error;
2058 
2059 	if (!(err = copy_from_user_tmpl(xp, attrs, p->dir, extack)))
2060 		err = copy_from_user_sec_ctx(xp, attrs);
2061 	if (err)
2062 		goto error;
2063 
2064 	xfrm_mark_get(attrs, &xp->mark);
2065 
2066 	if (attrs[XFRMA_IF_ID])
2067 		xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2068 
2069 	/* configure the hardware if offload is requested */
2070 	if (attrs[XFRMA_OFFLOAD_DEV]) {
2071 		err = xfrm_dev_policy_add(net, xp,
2072 					  nla_data(attrs[XFRMA_OFFLOAD_DEV]),
2073 					  p->dir, extack);
2074 		if (err)
2075 			goto error;
2076 	}
2077 
2078 	return xp;
2079  error:
2080 	*errp = err;
2081 	xp->walk.dead = 1;
2082 	xfrm_policy_destroy(xp);
2083 	return NULL;
2084 }
2085 
2086 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2087 			   struct nlattr **attrs,
2088 			   struct netlink_ext_ack *extack)
2089 {
2090 	struct net *net = sock_net(skb->sk);
2091 	struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
2092 	struct xfrm_policy *xp;
2093 	struct km_event c;
2094 	int err;
2095 	int excl;
2096 
2097 	err = verify_newpolicy_info(p, extack);
2098 	if (err)
2099 		return err;
2100 	err = verify_sec_ctx_len(attrs, extack);
2101 	if (err)
2102 		return err;
2103 
2104 	xp = xfrm_policy_construct(net, p, attrs, &err, extack);
2105 	if (!xp)
2106 		return err;
2107 
2108 	/* shouldn't excl be based on nlh flags??
2109 	 * Aha! this is anti-netlink really i.e  more pfkey derived
2110 	 * in netlink excl is a flag and you wouldn't need
2111 	 * a type XFRM_MSG_UPDPOLICY - JHS */
2112 	excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
2113 	err = xfrm_policy_insert(p->dir, xp, excl);
2114 	xfrm_audit_policy_add(xp, err ? 0 : 1, true);
2115 
2116 	if (err) {
2117 		xfrm_dev_policy_delete(xp);
2118 		xfrm_dev_policy_free(xp);
2119 		security_xfrm_policy_free(xp->security);
2120 		kfree(xp);
2121 		return err;
2122 	}
2123 
2124 	c.event = nlh->nlmsg_type;
2125 	c.seq = nlh->nlmsg_seq;
2126 	c.portid = nlh->nlmsg_pid;
2127 	km_policy_notify(xp, p->dir, &c);
2128 
2129 	xfrm_pol_put(xp);
2130 
2131 	return 0;
2132 }
2133 
2134 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
2135 {
2136 	struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
2137 	int i;
2138 
2139 	if (xp->xfrm_nr == 0)
2140 		return 0;
2141 
2142 	if (xp->xfrm_nr > XFRM_MAX_DEPTH)
2143 		return -ENOBUFS;
2144 
2145 	for (i = 0; i < xp->xfrm_nr; i++) {
2146 		struct xfrm_user_tmpl *up = &vec[i];
2147 		struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
2148 
2149 		memset(up, 0, sizeof(*up));
2150 		memcpy(&up->id, &kp->id, sizeof(up->id));
2151 		up->family = kp->encap_family;
2152 		memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
2153 		up->reqid = kp->reqid;
2154 		up->mode = kp->mode;
2155 		up->share = kp->share;
2156 		up->optional = kp->optional;
2157 		up->aalgos = kp->aalgos;
2158 		up->ealgos = kp->ealgos;
2159 		up->calgos = kp->calgos;
2160 	}
2161 
2162 	return nla_put(skb, XFRMA_TMPL,
2163 		       sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
2164 }
2165 
2166 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
2167 {
2168 	if (x->security) {
2169 		return copy_sec_ctx(x->security, skb);
2170 	}
2171 	return 0;
2172 }
2173 
2174 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
2175 {
2176 	if (xp->security)
2177 		return copy_sec_ctx(xp->security, skb);
2178 	return 0;
2179 }
2180 static inline unsigned int userpolicy_type_attrsize(void)
2181 {
2182 #ifdef CONFIG_XFRM_SUB_POLICY
2183 	return nla_total_size(sizeof(struct xfrm_userpolicy_type));
2184 #else
2185 	return 0;
2186 #endif
2187 }
2188 
2189 #ifdef CONFIG_XFRM_SUB_POLICY
2190 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
2191 {
2192 	struct xfrm_userpolicy_type upt;
2193 
2194 	/* Sadly there are two holes in struct xfrm_userpolicy_type */
2195 	memset(&upt, 0, sizeof(upt));
2196 	upt.type = type;
2197 
2198 	return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
2199 }
2200 
2201 #else
2202 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
2203 {
2204 	return 0;
2205 }
2206 #endif
2207 
2208 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
2209 {
2210 	struct xfrm_dump_info *sp = ptr;
2211 	struct xfrm_userpolicy_info *p;
2212 	struct sk_buff *in_skb = sp->in_skb;
2213 	struct sk_buff *skb = sp->out_skb;
2214 	struct xfrm_translator *xtr;
2215 	struct nlmsghdr *nlh;
2216 	int err;
2217 
2218 	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
2219 			XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
2220 	if (nlh == NULL)
2221 		return -EMSGSIZE;
2222 
2223 	p = nlmsg_data(nlh);
2224 	copy_to_user_policy(xp, p, dir);
2225 	err = copy_to_user_tmpl(xp, skb);
2226 	if (!err)
2227 		err = copy_to_user_sec_ctx(xp, skb);
2228 	if (!err)
2229 		err = copy_to_user_policy_type(xp->type, skb);
2230 	if (!err)
2231 		err = xfrm_mark_put(skb, &xp->mark);
2232 	if (!err)
2233 		err = xfrm_if_id_put(skb, xp->if_id);
2234 	if (!err && xp->xdo.dev)
2235 		err = copy_user_offload(&xp->xdo, skb);
2236 	if (err) {
2237 		nlmsg_cancel(skb, nlh);
2238 		return err;
2239 	}
2240 	nlmsg_end(skb, nlh);
2241 
2242 	xtr = xfrm_get_translator();
2243 	if (xtr) {
2244 		err = xtr->alloc_compat(skb, nlh);
2245 
2246 		xfrm_put_translator(xtr);
2247 		if (err) {
2248 			nlmsg_cancel(skb, nlh);
2249 			return err;
2250 		}
2251 	}
2252 
2253 	return 0;
2254 }
2255 
2256 static int xfrm_dump_policy_done(struct netlink_callback *cb)
2257 {
2258 	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
2259 	struct net *net = sock_net(cb->skb->sk);
2260 
2261 	xfrm_policy_walk_done(walk, net);
2262 	return 0;
2263 }
2264 
2265 static int xfrm_dump_policy_start(struct netlink_callback *cb)
2266 {
2267 	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
2268 
2269 	BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
2270 
2271 	xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
2272 	return 0;
2273 }
2274 
2275 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
2276 {
2277 	struct net *net = sock_net(skb->sk);
2278 	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
2279 	struct xfrm_dump_info info;
2280 
2281 	info.in_skb = cb->skb;
2282 	info.out_skb = skb;
2283 	info.nlmsg_seq = cb->nlh->nlmsg_seq;
2284 	info.nlmsg_flags = NLM_F_MULTI;
2285 
2286 	(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
2287 
2288 	return skb->len;
2289 }
2290 
2291 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
2292 					  struct xfrm_policy *xp,
2293 					  int dir, u32 seq)
2294 {
2295 	struct xfrm_dump_info info;
2296 	struct sk_buff *skb;
2297 	int err;
2298 
2299 	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2300 	if (!skb)
2301 		return ERR_PTR(-ENOMEM);
2302 
2303 	info.in_skb = in_skb;
2304 	info.out_skb = skb;
2305 	info.nlmsg_seq = seq;
2306 	info.nlmsg_flags = 0;
2307 
2308 	err = dump_one_policy(xp, dir, 0, &info);
2309 	if (err) {
2310 		kfree_skb(skb);
2311 		return ERR_PTR(err);
2312 	}
2313 
2314 	return skb;
2315 }
2316 
2317 static int xfrm_notify_userpolicy(struct net *net)
2318 {
2319 	struct xfrm_userpolicy_default *up;
2320 	int len = NLMSG_ALIGN(sizeof(*up));
2321 	struct nlmsghdr *nlh;
2322 	struct sk_buff *skb;
2323 	int err;
2324 
2325 	skb = nlmsg_new(len, GFP_ATOMIC);
2326 	if (skb == NULL)
2327 		return -ENOMEM;
2328 
2329 	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0);
2330 	if (nlh == NULL) {
2331 		kfree_skb(skb);
2332 		return -EMSGSIZE;
2333 	}
2334 
2335 	up = nlmsg_data(nlh);
2336 	up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
2337 	up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
2338 	up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
2339 
2340 	nlmsg_end(skb, nlh);
2341 
2342 	rcu_read_lock();
2343 	err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
2344 	rcu_read_unlock();
2345 
2346 	return err;
2347 }
2348 
2349 static bool xfrm_userpolicy_is_valid(__u8 policy)
2350 {
2351 	return policy == XFRM_USERPOLICY_BLOCK ||
2352 	       policy == XFRM_USERPOLICY_ACCEPT;
2353 }
2354 
2355 static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
2356 			    struct nlattr **attrs, struct netlink_ext_ack *extack)
2357 {
2358 	struct net *net = sock_net(skb->sk);
2359 	struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
2360 
2361 	if (xfrm_userpolicy_is_valid(up->in))
2362 		net->xfrm.policy_default[XFRM_POLICY_IN] = up->in;
2363 
2364 	if (xfrm_userpolicy_is_valid(up->fwd))
2365 		net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd;
2366 
2367 	if (xfrm_userpolicy_is_valid(up->out))
2368 		net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out;
2369 
2370 	rt_genid_bump_all(net);
2371 
2372 	xfrm_notify_userpolicy(net);
2373 	return 0;
2374 }
2375 
2376 static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
2377 			    struct nlattr **attrs, struct netlink_ext_ack *extack)
2378 {
2379 	struct sk_buff *r_skb;
2380 	struct nlmsghdr *r_nlh;
2381 	struct net *net = sock_net(skb->sk);
2382 	struct xfrm_userpolicy_default *r_up;
2383 	int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default));
2384 	u32 portid = NETLINK_CB(skb).portid;
2385 	u32 seq = nlh->nlmsg_seq;
2386 
2387 	r_skb = nlmsg_new(len, GFP_ATOMIC);
2388 	if (!r_skb)
2389 		return -ENOMEM;
2390 
2391 	r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0);
2392 	if (!r_nlh) {
2393 		kfree_skb(r_skb);
2394 		return -EMSGSIZE;
2395 	}
2396 
2397 	r_up = nlmsg_data(r_nlh);
2398 	r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
2399 	r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
2400 	r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
2401 	nlmsg_end(r_skb, r_nlh);
2402 
2403 	return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
2404 }
2405 
2406 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2407 			   struct nlattr **attrs,
2408 			   struct netlink_ext_ack *extack)
2409 {
2410 	struct net *net = sock_net(skb->sk);
2411 	struct xfrm_policy *xp;
2412 	struct xfrm_userpolicy_id *p;
2413 	u8 type = XFRM_POLICY_TYPE_MAIN;
2414 	int err;
2415 	struct km_event c;
2416 	int delete;
2417 	struct xfrm_mark m;
2418 	u32 if_id = 0;
2419 
2420 	p = nlmsg_data(nlh);
2421 	delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
2422 
2423 	err = copy_from_user_policy_type(&type, attrs, extack);
2424 	if (err)
2425 		return err;
2426 
2427 	err = verify_policy_dir(p->dir, extack);
2428 	if (err)
2429 		return err;
2430 
2431 	if (attrs[XFRMA_IF_ID])
2432 		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2433 
2434 	xfrm_mark_get(attrs, &m);
2435 
2436 	if (p->index)
2437 		xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
2438 				      p->index, delete, &err);
2439 	else {
2440 		struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2441 		struct xfrm_sec_ctx *ctx;
2442 
2443 		err = verify_sec_ctx_len(attrs, extack);
2444 		if (err)
2445 			return err;
2446 
2447 		ctx = NULL;
2448 		if (rt) {
2449 			struct xfrm_user_sec_ctx *uctx = nla_data(rt);
2450 
2451 			err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
2452 			if (err)
2453 				return err;
2454 		}
2455 		xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2456 					   &p->sel, ctx, delete, &err);
2457 		security_xfrm_policy_free(ctx);
2458 	}
2459 	if (xp == NULL)
2460 		return -ENOENT;
2461 
2462 	if (!delete) {
2463 		struct sk_buff *resp_skb;
2464 
2465 		resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
2466 		if (IS_ERR(resp_skb)) {
2467 			err = PTR_ERR(resp_skb);
2468 		} else {
2469 			err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
2470 					    NETLINK_CB(skb).portid);
2471 		}
2472 	} else {
2473 		xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
2474 
2475 		if (err != 0)
2476 			goto out;
2477 
2478 		c.data.byid = p->index;
2479 		c.event = nlh->nlmsg_type;
2480 		c.seq = nlh->nlmsg_seq;
2481 		c.portid = nlh->nlmsg_pid;
2482 		km_policy_notify(xp, p->dir, &c);
2483 	}
2484 
2485 out:
2486 	xfrm_pol_put(xp);
2487 	return err;
2488 }
2489 
2490 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
2491 			 struct nlattr **attrs,
2492 			 struct netlink_ext_ack *extack)
2493 {
2494 	struct net *net = sock_net(skb->sk);
2495 	struct km_event c;
2496 	struct xfrm_usersa_flush *p = nlmsg_data(nlh);
2497 	int err;
2498 
2499 	err = xfrm_state_flush(net, p->proto, true, false);
2500 	if (err) {
2501 		if (err == -ESRCH) /* empty table */
2502 			return 0;
2503 		return err;
2504 	}
2505 	c.data.proto = p->proto;
2506 	c.event = nlh->nlmsg_type;
2507 	c.seq = nlh->nlmsg_seq;
2508 	c.portid = nlh->nlmsg_pid;
2509 	c.net = net;
2510 	km_state_notify(NULL, &c);
2511 
2512 	return 0;
2513 }
2514 
2515 static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x)
2516 {
2517 	unsigned int replay_size = x->replay_esn ?
2518 			      xfrm_replay_state_esn_len(x->replay_esn) :
2519 			      sizeof(struct xfrm_replay_state);
2520 
2521 	return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
2522 	       + nla_total_size(replay_size)
2523 	       + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
2524 	       + nla_total_size(sizeof(struct xfrm_mark))
2525 	       + nla_total_size(4) /* XFRM_AE_RTHR */
2526 	       + nla_total_size(4) /* XFRM_AE_ETHR */
2527 	       + nla_total_size(sizeof(x->dir)); /* XFRMA_SA_DIR */
2528 }
2529 
2530 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2531 {
2532 	struct xfrm_aevent_id *id;
2533 	struct nlmsghdr *nlh;
2534 	int err;
2535 
2536 	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
2537 	if (nlh == NULL)
2538 		return -EMSGSIZE;
2539 
2540 	id = nlmsg_data(nlh);
2541 	memset(&id->sa_id, 0, sizeof(id->sa_id));
2542 	memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
2543 	id->sa_id.spi = x->id.spi;
2544 	id->sa_id.family = x->props.family;
2545 	id->sa_id.proto = x->id.proto;
2546 	memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr));
2547 	id->reqid = x->props.reqid;
2548 	id->flags = c->data.aevent;
2549 
2550 	if (x->replay_esn) {
2551 		err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
2552 			      xfrm_replay_state_esn_len(x->replay_esn),
2553 			      x->replay_esn);
2554 	} else {
2555 		err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
2556 			      &x->replay);
2557 	}
2558 	if (err)
2559 		goto out_cancel;
2560 	err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
2561 			    XFRMA_PAD);
2562 	if (err)
2563 		goto out_cancel;
2564 
2565 	if (id->flags & XFRM_AE_RTHR) {
2566 		err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
2567 		if (err)
2568 			goto out_cancel;
2569 	}
2570 	if (id->flags & XFRM_AE_ETHR) {
2571 		err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
2572 				  x->replay_maxage * 10 / HZ);
2573 		if (err)
2574 			goto out_cancel;
2575 	}
2576 	err = xfrm_mark_put(skb, &x->mark);
2577 	if (err)
2578 		goto out_cancel;
2579 
2580 	err = xfrm_if_id_put(skb, x->if_id);
2581 	if (err)
2582 		goto out_cancel;
2583 
2584 	if (x->dir) {
2585 		err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
2586 		if (err)
2587 			goto out_cancel;
2588 	}
2589 
2590 	nlmsg_end(skb, nlh);
2591 	return 0;
2592 
2593 out_cancel:
2594 	nlmsg_cancel(skb, nlh);
2595 	return err;
2596 }
2597 
2598 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
2599 		       struct nlattr **attrs, struct netlink_ext_ack *extack)
2600 {
2601 	struct net *net = sock_net(skb->sk);
2602 	struct xfrm_state *x;
2603 	struct sk_buff *r_skb;
2604 	int err;
2605 	struct km_event c;
2606 	u32 mark;
2607 	struct xfrm_mark m;
2608 	struct xfrm_aevent_id *p = nlmsg_data(nlh);
2609 	struct xfrm_usersa_id *id = &p->sa_id;
2610 
2611 	mark = xfrm_mark_get(attrs, &m);
2612 
2613 	x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
2614 	if (x == NULL)
2615 		return -ESRCH;
2616 
2617 	r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
2618 	if (r_skb == NULL) {
2619 		xfrm_state_put(x);
2620 		return -ENOMEM;
2621 	}
2622 
2623 	/*
2624 	 * XXX: is this lock really needed - none of the other
2625 	 * gets lock (the concern is things getting updated
2626 	 * while we are still reading) - jhs
2627 	*/
2628 	spin_lock_bh(&x->lock);
2629 	c.data.aevent = p->flags;
2630 	c.seq = nlh->nlmsg_seq;
2631 	c.portid = nlh->nlmsg_pid;
2632 
2633 	err = build_aevent(r_skb, x, &c);
2634 	BUG_ON(err < 0);
2635 
2636 	err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
2637 	spin_unlock_bh(&x->lock);
2638 	xfrm_state_put(x);
2639 	return err;
2640 }
2641 
2642 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
2643 		       struct nlattr **attrs, struct netlink_ext_ack *extack)
2644 {
2645 	struct net *net = sock_net(skb->sk);
2646 	struct xfrm_state *x;
2647 	struct km_event c;
2648 	int err = -EINVAL;
2649 	u32 mark = 0;
2650 	struct xfrm_mark m;
2651 	struct xfrm_aevent_id *p = nlmsg_data(nlh);
2652 	struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
2653 	struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
2654 	struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
2655 	struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
2656 	struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
2657 
2658 	if (!lt && !rp && !re && !et && !rt) {
2659 		NL_SET_ERR_MSG(extack, "Missing required attribute for AE");
2660 		return err;
2661 	}
2662 
2663 	/* pedantic mode - thou shalt sayeth replaceth */
2664 	if (!(nlh->nlmsg_flags & NLM_F_REPLACE)) {
2665 		NL_SET_ERR_MSG(extack, "NLM_F_REPLACE flag is required");
2666 		return err;
2667 	}
2668 
2669 	mark = xfrm_mark_get(attrs, &m);
2670 
2671 	x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
2672 	if (x == NULL)
2673 		return -ESRCH;
2674 
2675 	if (x->km.state != XFRM_STATE_VALID) {
2676 		NL_SET_ERR_MSG(extack, "SA must be in VALID state");
2677 		goto out;
2678 	}
2679 
2680 	err = xfrm_replay_verify_len(x->replay_esn, re, extack);
2681 	if (err)
2682 		goto out;
2683 
2684 	spin_lock_bh(&x->lock);
2685 	xfrm_update_ae_params(x, attrs, 1);
2686 	spin_unlock_bh(&x->lock);
2687 
2688 	c.event = nlh->nlmsg_type;
2689 	c.seq = nlh->nlmsg_seq;
2690 	c.portid = nlh->nlmsg_pid;
2691 	c.data.aevent = XFRM_AE_CU;
2692 	km_state_notify(x, &c);
2693 	err = 0;
2694 out:
2695 	xfrm_state_put(x);
2696 	return err;
2697 }
2698 
2699 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2700 			     struct nlattr **attrs,
2701 			     struct netlink_ext_ack *extack)
2702 {
2703 	struct net *net = sock_net(skb->sk);
2704 	struct km_event c;
2705 	u8 type = XFRM_POLICY_TYPE_MAIN;
2706 	int err;
2707 
2708 	err = copy_from_user_policy_type(&type, attrs, extack);
2709 	if (err)
2710 		return err;
2711 
2712 	err = xfrm_policy_flush(net, type, true);
2713 	if (err) {
2714 		if (err == -ESRCH) /* empty table */
2715 			return 0;
2716 		return err;
2717 	}
2718 
2719 	c.data.type = type;
2720 	c.event = nlh->nlmsg_type;
2721 	c.seq = nlh->nlmsg_seq;
2722 	c.portid = nlh->nlmsg_pid;
2723 	c.net = net;
2724 	km_policy_notify(NULL, 0, &c);
2725 	return 0;
2726 }
2727 
2728 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2729 			       struct nlattr **attrs,
2730 			       struct netlink_ext_ack *extack)
2731 {
2732 	struct net *net = sock_net(skb->sk);
2733 	struct xfrm_policy *xp;
2734 	struct xfrm_user_polexpire *up = nlmsg_data(nlh);
2735 	struct xfrm_userpolicy_info *p = &up->pol;
2736 	u8 type = XFRM_POLICY_TYPE_MAIN;
2737 	int err = -ENOENT;
2738 	struct xfrm_mark m;
2739 	u32 if_id = 0;
2740 
2741 	err = copy_from_user_policy_type(&type, attrs, extack);
2742 	if (err)
2743 		return err;
2744 
2745 	err = verify_policy_dir(p->dir, extack);
2746 	if (err)
2747 		return err;
2748 
2749 	if (attrs[XFRMA_IF_ID])
2750 		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2751 
2752 	xfrm_mark_get(attrs, &m);
2753 
2754 	if (p->index)
2755 		xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
2756 				      0, &err);
2757 	else {
2758 		struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2759 		struct xfrm_sec_ctx *ctx;
2760 
2761 		err = verify_sec_ctx_len(attrs, extack);
2762 		if (err)
2763 			return err;
2764 
2765 		ctx = NULL;
2766 		if (rt) {
2767 			struct xfrm_user_sec_ctx *uctx = nla_data(rt);
2768 
2769 			err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
2770 			if (err)
2771 				return err;
2772 		}
2773 		xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2774 					   &p->sel, ctx, 0, &err);
2775 		security_xfrm_policy_free(ctx);
2776 	}
2777 	if (xp == NULL)
2778 		return -ENOENT;
2779 
2780 	if (unlikely(xp->walk.dead))
2781 		goto out;
2782 
2783 	err = 0;
2784 	if (up->hard) {
2785 		xfrm_policy_delete(xp, p->dir);
2786 		xfrm_audit_policy_delete(xp, 1, true);
2787 	}
2788 	km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
2789 
2790 out:
2791 	xfrm_pol_put(xp);
2792 	return err;
2793 }
2794 
2795 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2796 			      struct nlattr **attrs,
2797 			      struct netlink_ext_ack *extack)
2798 {
2799 	struct net *net = sock_net(skb->sk);
2800 	struct xfrm_state *x;
2801 	int err;
2802 	struct xfrm_user_expire *ue = nlmsg_data(nlh);
2803 	struct xfrm_usersa_info *p = &ue->state;
2804 	struct xfrm_mark m;
2805 	u32 mark = xfrm_mark_get(attrs, &m);
2806 
2807 	x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
2808 
2809 	err = -ENOENT;
2810 	if (x == NULL)
2811 		return err;
2812 
2813 	spin_lock_bh(&x->lock);
2814 	err = -EINVAL;
2815 	if (x->km.state != XFRM_STATE_VALID) {
2816 		NL_SET_ERR_MSG(extack, "SA must be in VALID state");
2817 		goto out;
2818 	}
2819 
2820 	km_state_expired(x, ue->hard, nlh->nlmsg_pid);
2821 
2822 	if (ue->hard) {
2823 		__xfrm_state_delete(x);
2824 		xfrm_audit_state_delete(x, 1, true);
2825 	}
2826 	err = 0;
2827 out:
2828 	spin_unlock_bh(&x->lock);
2829 	xfrm_state_put(x);
2830 	return err;
2831 }
2832 
2833 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2834 			    struct nlattr **attrs,
2835 			    struct netlink_ext_ack *extack)
2836 {
2837 	struct net *net = sock_net(skb->sk);
2838 	struct xfrm_policy *xp;
2839 	struct xfrm_user_tmpl *ut;
2840 	int i;
2841 	struct nlattr *rt = attrs[XFRMA_TMPL];
2842 	struct xfrm_mark mark;
2843 
2844 	struct xfrm_user_acquire *ua = nlmsg_data(nlh);
2845 	struct xfrm_state *x = xfrm_state_alloc(net);
2846 	int err = -ENOMEM;
2847 
2848 	if (!x)
2849 		goto nomem;
2850 
2851 	xfrm_mark_get(attrs, &mark);
2852 
2853 	err = verify_newpolicy_info(&ua->policy, extack);
2854 	if (err)
2855 		goto free_state;
2856 	err = verify_sec_ctx_len(attrs, extack);
2857 	if (err)
2858 		goto free_state;
2859 
2860 	/*   build an XP */
2861 	xp = xfrm_policy_construct(net, &ua->policy, attrs, &err, extack);
2862 	if (!xp)
2863 		goto free_state;
2864 
2865 	memcpy(&x->id, &ua->id, sizeof(ua->id));
2866 	memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
2867 	memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
2868 	xp->mark.m = x->mark.m = mark.m;
2869 	xp->mark.v = x->mark.v = mark.v;
2870 	ut = nla_data(rt);
2871 	/* extract the templates and for each call km_key */
2872 	for (i = 0; i < xp->xfrm_nr; i++, ut++) {
2873 		struct xfrm_tmpl *t = &xp->xfrm_vec[i];
2874 		memcpy(&x->id, &t->id, sizeof(x->id));
2875 		x->props.mode = t->mode;
2876 		x->props.reqid = t->reqid;
2877 		x->props.family = ut->family;
2878 		t->aalgos = ua->aalgos;
2879 		t->ealgos = ua->ealgos;
2880 		t->calgos = ua->calgos;
2881 		err = km_query(x, t, xp);
2882 
2883 	}
2884 
2885 	xfrm_state_free(x);
2886 	kfree(xp);
2887 
2888 	return 0;
2889 
2890 free_state:
2891 	xfrm_state_free(x);
2892 nomem:
2893 	return err;
2894 }
2895 
2896 #ifdef CONFIG_XFRM_MIGRATE
2897 static int copy_from_user_migrate(struct xfrm_migrate *ma,
2898 				  struct xfrm_kmaddress *k,
2899 				  struct nlattr **attrs, int *num,
2900 				  struct netlink_ext_ack *extack)
2901 {
2902 	struct nlattr *rt = attrs[XFRMA_MIGRATE];
2903 	struct xfrm_user_migrate *um;
2904 	int i, num_migrate;
2905 
2906 	if (k != NULL) {
2907 		struct xfrm_user_kmaddress *uk;
2908 
2909 		uk = nla_data(attrs[XFRMA_KMADDRESS]);
2910 		memcpy(&k->local, &uk->local, sizeof(k->local));
2911 		memcpy(&k->remote, &uk->remote, sizeof(k->remote));
2912 		k->family = uk->family;
2913 		k->reserved = uk->reserved;
2914 	}
2915 
2916 	um = nla_data(rt);
2917 	num_migrate = nla_len(rt) / sizeof(*um);
2918 
2919 	if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) {
2920 		NL_SET_ERR_MSG(extack, "Invalid number of SAs to migrate, must be 0 < num <= XFRM_MAX_DEPTH (6)");
2921 		return -EINVAL;
2922 	}
2923 
2924 	for (i = 0; i < num_migrate; i++, um++, ma++) {
2925 		memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
2926 		memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
2927 		memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
2928 		memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
2929 
2930 		ma->proto = um->proto;
2931 		ma->mode = um->mode;
2932 		ma->reqid = um->reqid;
2933 
2934 		ma->old_family = um->old_family;
2935 		ma->new_family = um->new_family;
2936 	}
2937 
2938 	*num = i;
2939 	return 0;
2940 }
2941 
2942 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2943 			   struct nlattr **attrs, struct netlink_ext_ack *extack)
2944 {
2945 	struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
2946 	struct xfrm_migrate m[XFRM_MAX_DEPTH];
2947 	struct xfrm_kmaddress km, *kmp;
2948 	u8 type;
2949 	int err;
2950 	int n = 0;
2951 	struct net *net = sock_net(skb->sk);
2952 	struct xfrm_encap_tmpl  *encap = NULL;
2953 	u32 if_id = 0;
2954 
2955 	if (!attrs[XFRMA_MIGRATE]) {
2956 		NL_SET_ERR_MSG(extack, "Missing required MIGRATE attribute");
2957 		return -EINVAL;
2958 	}
2959 
2960 	kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
2961 
2962 	err = copy_from_user_policy_type(&type, attrs, extack);
2963 	if (err)
2964 		return err;
2965 
2966 	err = copy_from_user_migrate(m, kmp, attrs, &n, extack);
2967 	if (err)
2968 		return err;
2969 
2970 	if (!n)
2971 		return 0;
2972 
2973 	if (attrs[XFRMA_ENCAP]) {
2974 		encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
2975 				sizeof(*encap), GFP_KERNEL);
2976 		if (!encap)
2977 			return -ENOMEM;
2978 	}
2979 
2980 	if (attrs[XFRMA_IF_ID])
2981 		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2982 
2983 	err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap,
2984 			   if_id, extack);
2985 
2986 	kfree(encap);
2987 
2988 	return err;
2989 }
2990 #else
2991 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2992 			   struct nlattr **attrs, struct netlink_ext_ack *extack)
2993 {
2994 	return -ENOPROTOOPT;
2995 }
2996 #endif
2997 
2998 #ifdef CONFIG_XFRM_MIGRATE
2999 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
3000 {
3001 	struct xfrm_user_migrate um;
3002 
3003 	memset(&um, 0, sizeof(um));
3004 	um.proto = m->proto;
3005 	um.mode = m->mode;
3006 	um.reqid = m->reqid;
3007 	um.old_family = m->old_family;
3008 	memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
3009 	memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
3010 	um.new_family = m->new_family;
3011 	memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
3012 	memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
3013 
3014 	return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
3015 }
3016 
3017 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
3018 {
3019 	struct xfrm_user_kmaddress uk;
3020 
3021 	memset(&uk, 0, sizeof(uk));
3022 	uk.family = k->family;
3023 	uk.reserved = k->reserved;
3024 	memcpy(&uk.local, &k->local, sizeof(uk.local));
3025 	memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
3026 
3027 	return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
3028 }
3029 
3030 static inline unsigned int xfrm_migrate_msgsize(int num_migrate, int with_kma,
3031 						int with_encp)
3032 {
3033 	return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
3034 	      + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
3035 	      + (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0)
3036 	      + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
3037 	      + userpolicy_type_attrsize();
3038 }
3039 
3040 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
3041 			 int num_migrate, const struct xfrm_kmaddress *k,
3042 			 const struct xfrm_selector *sel,
3043 			 const struct xfrm_encap_tmpl *encap, u8 dir, u8 type)
3044 {
3045 	const struct xfrm_migrate *mp;
3046 	struct xfrm_userpolicy_id *pol_id;
3047 	struct nlmsghdr *nlh;
3048 	int i, err;
3049 
3050 	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
3051 	if (nlh == NULL)
3052 		return -EMSGSIZE;
3053 
3054 	pol_id = nlmsg_data(nlh);
3055 	/* copy data from selector, dir, and type to the pol_id */
3056 	memset(pol_id, 0, sizeof(*pol_id));
3057 	memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
3058 	pol_id->dir = dir;
3059 
3060 	if (k != NULL) {
3061 		err = copy_to_user_kmaddress(k, skb);
3062 		if (err)
3063 			goto out_cancel;
3064 	}
3065 	if (encap) {
3066 		err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap);
3067 		if (err)
3068 			goto out_cancel;
3069 	}
3070 	err = copy_to_user_policy_type(type, skb);
3071 	if (err)
3072 		goto out_cancel;
3073 	for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
3074 		err = copy_to_user_migrate(mp, skb);
3075 		if (err)
3076 			goto out_cancel;
3077 	}
3078 
3079 	nlmsg_end(skb, nlh);
3080 	return 0;
3081 
3082 out_cancel:
3083 	nlmsg_cancel(skb, nlh);
3084 	return err;
3085 }
3086 
3087 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3088 			     const struct xfrm_migrate *m, int num_migrate,
3089 			     const struct xfrm_kmaddress *k,
3090 			     const struct xfrm_encap_tmpl *encap)
3091 {
3092 	struct net *net = &init_net;
3093 	struct sk_buff *skb;
3094 	int err;
3095 
3096 	skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap),
3097 			GFP_ATOMIC);
3098 	if (skb == NULL)
3099 		return -ENOMEM;
3100 
3101 	/* build migrate */
3102 	err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type);
3103 	BUG_ON(err < 0);
3104 
3105 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
3106 }
3107 #else
3108 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
3109 			     const struct xfrm_migrate *m, int num_migrate,
3110 			     const struct xfrm_kmaddress *k,
3111 			     const struct xfrm_encap_tmpl *encap)
3112 {
3113 	return -ENOPROTOOPT;
3114 }
3115 #endif
3116 
3117 #define XMSGSIZE(type) sizeof(struct type)
3118 
3119 const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
3120 	[XFRM_MSG_NEWSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
3121 	[XFRM_MSG_DELSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
3122 	[XFRM_MSG_GETSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
3123 	[XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
3124 	[XFRM_MSG_DELPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
3125 	[XFRM_MSG_GETPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
3126 	[XFRM_MSG_ALLOCSPI    - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
3127 	[XFRM_MSG_ACQUIRE     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
3128 	[XFRM_MSG_EXPIRE      - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
3129 	[XFRM_MSG_UPDPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
3130 	[XFRM_MSG_UPDSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
3131 	[XFRM_MSG_POLEXPIRE   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
3132 	[XFRM_MSG_FLUSHSA     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
3133 	[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
3134 	[XFRM_MSG_NEWAE       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
3135 	[XFRM_MSG_GETAE       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
3136 	[XFRM_MSG_REPORT      - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
3137 	[XFRM_MSG_MIGRATE     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
3138 	[XFRM_MSG_GETSADINFO  - XFRM_MSG_BASE] = sizeof(u32),
3139 	[XFRM_MSG_NEWSPDINFO  - XFRM_MSG_BASE] = sizeof(u32),
3140 	[XFRM_MSG_GETSPDINFO  - XFRM_MSG_BASE] = sizeof(u32),
3141 	[XFRM_MSG_SETDEFAULT  - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
3142 	[XFRM_MSG_GETDEFAULT  - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
3143 };
3144 EXPORT_SYMBOL_GPL(xfrm_msg_min);
3145 
3146 #undef XMSGSIZE
3147 
3148 const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
3149 	[XFRMA_UNSPEC]		= { .strict_start_type = XFRMA_SA_DIR },
3150 	[XFRMA_SA]		= { .len = sizeof(struct xfrm_usersa_info)},
3151 	[XFRMA_POLICY]		= { .len = sizeof(struct xfrm_userpolicy_info)},
3152 	[XFRMA_LASTUSED]	= { .type = NLA_U64},
3153 	[XFRMA_ALG_AUTH_TRUNC]	= { .len = sizeof(struct xfrm_algo_auth)},
3154 	[XFRMA_ALG_AEAD]	= { .len = sizeof(struct xfrm_algo_aead) },
3155 	[XFRMA_ALG_AUTH]	= { .len = sizeof(struct xfrm_algo) },
3156 	[XFRMA_ALG_CRYPT]	= { .len = sizeof(struct xfrm_algo) },
3157 	[XFRMA_ALG_COMP]	= { .len = sizeof(struct xfrm_algo) },
3158 	[XFRMA_ENCAP]		= { .len = sizeof(struct xfrm_encap_tmpl) },
3159 	[XFRMA_TMPL]		= { .len = sizeof(struct xfrm_user_tmpl) },
3160 	[XFRMA_SEC_CTX]		= { .len = sizeof(struct xfrm_user_sec_ctx) },
3161 	[XFRMA_LTIME_VAL]	= { .len = sizeof(struct xfrm_lifetime_cur) },
3162 	[XFRMA_REPLAY_VAL]	= { .len = sizeof(struct xfrm_replay_state) },
3163 	[XFRMA_REPLAY_THRESH]	= { .type = NLA_U32 },
3164 	[XFRMA_ETIMER_THRESH]	= { .type = NLA_U32 },
3165 	[XFRMA_SRCADDR]		= { .len = sizeof(xfrm_address_t) },
3166 	[XFRMA_COADDR]		= { .len = sizeof(xfrm_address_t) },
3167 	[XFRMA_POLICY_TYPE]	= { .len = sizeof(struct xfrm_userpolicy_type)},
3168 	[XFRMA_MIGRATE]		= { .len = sizeof(struct xfrm_user_migrate) },
3169 	[XFRMA_KMADDRESS]	= { .len = sizeof(struct xfrm_user_kmaddress) },
3170 	[XFRMA_MARK]		= { .len = sizeof(struct xfrm_mark) },
3171 	[XFRMA_TFCPAD]		= { .type = NLA_U32 },
3172 	[XFRMA_REPLAY_ESN_VAL]	= { .len = sizeof(struct xfrm_replay_state_esn) },
3173 	[XFRMA_SA_EXTRA_FLAGS]	= { .type = NLA_U32 },
3174 	[XFRMA_PROTO]		= { .type = NLA_U8 },
3175 	[XFRMA_ADDRESS_FILTER]	= { .len = sizeof(struct xfrm_address_filter) },
3176 	[XFRMA_OFFLOAD_DEV]	= { .len = sizeof(struct xfrm_user_offload) },
3177 	[XFRMA_SET_MARK]	= { .type = NLA_U32 },
3178 	[XFRMA_SET_MARK_MASK]	= { .type = NLA_U32 },
3179 	[XFRMA_IF_ID]		= { .type = NLA_U32 },
3180 	[XFRMA_MTIMER_THRESH]   = { .type = NLA_U32 },
3181 	[XFRMA_SA_DIR]          = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT),
3182 	[XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 },
3183 };
3184 EXPORT_SYMBOL_GPL(xfrma_policy);
3185 
3186 static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
3187 	[XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
3188 	[XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
3189 };
3190 
3191 static const struct xfrm_link {
3192 	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **,
3193 		    struct netlink_ext_ack *);
3194 	int (*start)(struct netlink_callback *);
3195 	int (*dump)(struct sk_buff *, struct netlink_callback *);
3196 	int (*done)(struct netlink_callback *);
3197 	const struct nla_policy *nla_pol;
3198 	int nla_max;
3199 } xfrm_dispatch[XFRM_NR_MSGTYPES] = {
3200 	[XFRM_MSG_NEWSA       - XFRM_MSG_BASE] = { .doit = xfrm_add_sa        },
3201 	[XFRM_MSG_DELSA       - XFRM_MSG_BASE] = { .doit = xfrm_del_sa        },
3202 	[XFRM_MSG_GETSA       - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
3203 						   .dump = xfrm_dump_sa,
3204 						   .done = xfrm_dump_sa_done  },
3205 	[XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_add_policy    },
3206 	[XFRM_MSG_DELPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy    },
3207 	[XFRM_MSG_GETPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
3208 						   .start = xfrm_dump_policy_start,
3209 						   .dump = xfrm_dump_policy,
3210 						   .done = xfrm_dump_policy_done },
3211 	[XFRM_MSG_ALLOCSPI    - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
3212 	[XFRM_MSG_ACQUIRE     - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire   },
3213 	[XFRM_MSG_EXPIRE      - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
3214 	[XFRM_MSG_UPDPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_add_policy    },
3215 	[XFRM_MSG_UPDSA       - XFRM_MSG_BASE] = { .doit = xfrm_add_sa        },
3216 	[XFRM_MSG_POLEXPIRE   - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
3217 	[XFRM_MSG_FLUSHSA     - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa      },
3218 	[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy  },
3219 	[XFRM_MSG_NEWAE       - XFRM_MSG_BASE] = { .doit = xfrm_new_ae  },
3220 	[XFRM_MSG_GETAE       - XFRM_MSG_BASE] = { .doit = xfrm_get_ae  },
3221 	[XFRM_MSG_MIGRATE     - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate    },
3222 	[XFRM_MSG_GETSADINFO  - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo   },
3223 	[XFRM_MSG_NEWSPDINFO  - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo,
3224 						   .nla_pol = xfrma_spd_policy,
3225 						   .nla_max = XFRMA_SPD_MAX },
3226 	[XFRM_MSG_GETSPDINFO  - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo   },
3227 	[XFRM_MSG_SETDEFAULT  - XFRM_MSG_BASE] = { .doit = xfrm_set_default   },
3228 	[XFRM_MSG_GETDEFAULT  - XFRM_MSG_BASE] = { .doit = xfrm_get_default   },
3229 };
3230 
3231 static int xfrm_reject_unused_attr(int type, struct nlattr **attrs,
3232 				   struct netlink_ext_ack *extack)
3233 {
3234 	if (attrs[XFRMA_SA_DIR]) {
3235 		switch (type) {
3236 		case XFRM_MSG_NEWSA:
3237 		case XFRM_MSG_UPDSA:
3238 		case XFRM_MSG_ALLOCSPI:
3239 			break;
3240 		default:
3241 			NL_SET_ERR_MSG(extack, "Invalid attribute SA_DIR");
3242 			return -EINVAL;
3243 		}
3244 	}
3245 
3246 	return 0;
3247 }
3248 
3249 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
3250 			     struct netlink_ext_ack *extack)
3251 {
3252 	struct net *net = sock_net(skb->sk);
3253 	struct nlattr *attrs[XFRMA_MAX+1];
3254 	const struct xfrm_link *link;
3255 	struct nlmsghdr *nlh64 = NULL;
3256 	int type, err;
3257 
3258 	type = nlh->nlmsg_type;
3259 	if (type > XFRM_MSG_MAX)
3260 		return -EINVAL;
3261 
3262 	type -= XFRM_MSG_BASE;
3263 	link = &xfrm_dispatch[type];
3264 
3265 	/* All operations require privileges, even GET */
3266 	if (!netlink_net_capable(skb, CAP_NET_ADMIN))
3267 		return -EPERM;
3268 
3269 	if (in_compat_syscall()) {
3270 		struct xfrm_translator *xtr = xfrm_get_translator();
3271 
3272 		if (!xtr)
3273 			return -EOPNOTSUPP;
3274 
3275 		nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
3276 					    link->nla_pol, extack);
3277 		xfrm_put_translator(xtr);
3278 		if (IS_ERR(nlh64))
3279 			return PTR_ERR(nlh64);
3280 		if (nlh64)
3281 			nlh = nlh64;
3282 	}
3283 
3284 	if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
3285 	     type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
3286 	    (nlh->nlmsg_flags & NLM_F_DUMP)) {
3287 		struct netlink_dump_control c = {
3288 			.start = link->start,
3289 			.dump = link->dump,
3290 			.done = link->done,
3291 		};
3292 
3293 		if (link->dump == NULL) {
3294 			err = -EINVAL;
3295 			goto err;
3296 		}
3297 
3298 		err = netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
3299 		goto err;
3300 	}
3301 
3302 	err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs,
3303 				     link->nla_max ? : XFRMA_MAX,
3304 				     link->nla_pol ? : xfrma_policy, extack);
3305 	if (err < 0)
3306 		goto err;
3307 
3308 	if (!link->nla_pol || link->nla_pol == xfrma_policy) {
3309 		err = xfrm_reject_unused_attr((type + XFRM_MSG_BASE), attrs, extack);
3310 		if (err < 0)
3311 			goto err;
3312 	}
3313 
3314 	if (link->doit == NULL) {
3315 		err = -EINVAL;
3316 		goto err;
3317 	}
3318 
3319 	err = link->doit(skb, nlh, attrs, extack);
3320 
3321 	/* We need to free skb allocated in xfrm_alloc_compat() before
3322 	 * returning from this function, because consume_skb() won't take
3323 	 * care of frag_list since netlink destructor sets
3324 	 * sbk->head to NULL. (see netlink_skb_destructor())
3325 	 */
3326 	if (skb_has_frag_list(skb)) {
3327 		kfree_skb(skb_shinfo(skb)->frag_list);
3328 		skb_shinfo(skb)->frag_list = NULL;
3329 	}
3330 
3331 err:
3332 	kvfree(nlh64);
3333 	return err;
3334 }
3335 
3336 static void xfrm_netlink_rcv(struct sk_buff *skb)
3337 {
3338 	struct net *net = sock_net(skb->sk);
3339 
3340 	mutex_lock(&net->xfrm.xfrm_cfg_mutex);
3341 	netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
3342 	mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
3343 }
3344 
3345 static inline unsigned int xfrm_expire_msgsize(void)
3346 {
3347 	return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) +
3348 	       nla_total_size(sizeof(struct xfrm_mark)) +
3349 	       nla_total_size(sizeof_field(struct xfrm_state, dir));
3350 }
3351 
3352 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
3353 {
3354 	struct xfrm_user_expire *ue;
3355 	struct nlmsghdr *nlh;
3356 	int err;
3357 
3358 	nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
3359 	if (nlh == NULL)
3360 		return -EMSGSIZE;
3361 
3362 	ue = nlmsg_data(nlh);
3363 	copy_to_user_state(x, &ue->state);
3364 	ue->hard = (c->data.hard != 0) ? 1 : 0;
3365 	/* clear the padding bytes */
3366 	memset_after(ue, 0, hard);
3367 
3368 	err = xfrm_mark_put(skb, &x->mark);
3369 	if (err)
3370 		return err;
3371 
3372 	err = xfrm_if_id_put(skb, x->if_id);
3373 	if (err)
3374 		return err;
3375 
3376 	if (x->dir) {
3377 		err = nla_put_u8(skb, XFRMA_SA_DIR, x->dir);
3378 		if (err)
3379 			return err;
3380 	}
3381 
3382 	nlmsg_end(skb, nlh);
3383 	return 0;
3384 }
3385 
3386 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
3387 {
3388 	struct net *net = xs_net(x);
3389 	struct sk_buff *skb;
3390 
3391 	skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
3392 	if (skb == NULL)
3393 		return -ENOMEM;
3394 
3395 	if (build_expire(skb, x, c) < 0) {
3396 		kfree_skb(skb);
3397 		return -EMSGSIZE;
3398 	}
3399 
3400 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
3401 }
3402 
3403 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
3404 {
3405 	struct net *net = xs_net(x);
3406 	struct sk_buff *skb;
3407 	int err;
3408 
3409 	skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
3410 	if (skb == NULL)
3411 		return -ENOMEM;
3412 
3413 	err = build_aevent(skb, x, c);
3414 	BUG_ON(err < 0);
3415 
3416 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
3417 }
3418 
3419 static int xfrm_notify_sa_flush(const struct km_event *c)
3420 {
3421 	struct net *net = c->net;
3422 	struct xfrm_usersa_flush *p;
3423 	struct nlmsghdr *nlh;
3424 	struct sk_buff *skb;
3425 	int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
3426 
3427 	skb = nlmsg_new(len, GFP_ATOMIC);
3428 	if (skb == NULL)
3429 		return -ENOMEM;
3430 
3431 	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
3432 	if (nlh == NULL) {
3433 		kfree_skb(skb);
3434 		return -EMSGSIZE;
3435 	}
3436 
3437 	p = nlmsg_data(nlh);
3438 	p->proto = c->data.proto;
3439 
3440 	nlmsg_end(skb, nlh);
3441 
3442 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
3443 }
3444 
3445 static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
3446 {
3447 	unsigned int l = 0;
3448 	if (x->aead)
3449 		l += nla_total_size(aead_len(x->aead));
3450 	if (x->aalg) {
3451 		l += nla_total_size(sizeof(struct xfrm_algo) +
3452 				    (x->aalg->alg_key_len + 7) / 8);
3453 		l += nla_total_size(xfrm_alg_auth_len(x->aalg));
3454 	}
3455 	if (x->ealg)
3456 		l += nla_total_size(xfrm_alg_len(x->ealg));
3457 	if (x->calg)
3458 		l += nla_total_size(sizeof(*x->calg));
3459 	if (x->encap)
3460 		l += nla_total_size(sizeof(*x->encap));
3461 	if (x->tfcpad)
3462 		l += nla_total_size(sizeof(x->tfcpad));
3463 	if (x->replay_esn)
3464 		l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
3465 	else
3466 		l += nla_total_size(sizeof(struct xfrm_replay_state));
3467 	if (x->security)
3468 		l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
3469 				    x->security->ctx_len);
3470 	if (x->coaddr)
3471 		l += nla_total_size(sizeof(*x->coaddr));
3472 	if (x->props.extra_flags)
3473 		l += nla_total_size(sizeof(x->props.extra_flags));
3474 	if (x->xso.dev)
3475 		 l += nla_total_size(sizeof(struct xfrm_user_offload));
3476 	if (x->props.smark.v | x->props.smark.m) {
3477 		l += nla_total_size(sizeof(x->props.smark.v));
3478 		l += nla_total_size(sizeof(x->props.smark.m));
3479 	}
3480 	if (x->if_id)
3481 		l += nla_total_size(sizeof(x->if_id));
3482 
3483 	/* Must count x->lastused as it may become non-zero behind our back. */
3484 	l += nla_total_size_64bit(sizeof(u64));
3485 
3486 	if (x->mapping_maxage)
3487 		l += nla_total_size(sizeof(x->mapping_maxage));
3488 
3489 	if (x->dir)
3490 		l += nla_total_size(sizeof(x->dir));
3491 
3492 	if (x->nat_keepalive_interval)
3493 		l += nla_total_size(sizeof(x->nat_keepalive_interval));
3494 
3495 	return l;
3496 }
3497 
3498 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
3499 {
3500 	struct net *net = xs_net(x);
3501 	struct xfrm_usersa_info *p;
3502 	struct xfrm_usersa_id *id;
3503 	struct nlmsghdr *nlh;
3504 	struct sk_buff *skb;
3505 	unsigned int len = xfrm_sa_len(x);
3506 	unsigned int headlen;
3507 	int err;
3508 
3509 	headlen = sizeof(*p);
3510 	if (c->event == XFRM_MSG_DELSA) {
3511 		len += nla_total_size(headlen);
3512 		headlen = sizeof(*id);
3513 		len += nla_total_size(sizeof(struct xfrm_mark));
3514 	}
3515 	len += NLMSG_ALIGN(headlen);
3516 
3517 	skb = nlmsg_new(len, GFP_ATOMIC);
3518 	if (skb == NULL)
3519 		return -ENOMEM;
3520 
3521 	nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
3522 	err = -EMSGSIZE;
3523 	if (nlh == NULL)
3524 		goto out_free_skb;
3525 
3526 	p = nlmsg_data(nlh);
3527 	if (c->event == XFRM_MSG_DELSA) {
3528 		struct nlattr *attr;
3529 
3530 		id = nlmsg_data(nlh);
3531 		memset(id, 0, sizeof(*id));
3532 		memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
3533 		id->spi = x->id.spi;
3534 		id->family = x->props.family;
3535 		id->proto = x->id.proto;
3536 
3537 		attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
3538 		err = -EMSGSIZE;
3539 		if (attr == NULL)
3540 			goto out_free_skb;
3541 
3542 		p = nla_data(attr);
3543 	}
3544 	err = copy_to_user_state_extra(x, p, skb);
3545 	if (err)
3546 		goto out_free_skb;
3547 
3548 	nlmsg_end(skb, nlh);
3549 
3550 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
3551 
3552 out_free_skb:
3553 	kfree_skb(skb);
3554 	return err;
3555 }
3556 
3557 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
3558 {
3559 
3560 	switch (c->event) {
3561 	case XFRM_MSG_EXPIRE:
3562 		return xfrm_exp_state_notify(x, c);
3563 	case XFRM_MSG_NEWAE:
3564 		return xfrm_aevent_state_notify(x, c);
3565 	case XFRM_MSG_DELSA:
3566 	case XFRM_MSG_UPDSA:
3567 	case XFRM_MSG_NEWSA:
3568 		return xfrm_notify_sa(x, c);
3569 	case XFRM_MSG_FLUSHSA:
3570 		return xfrm_notify_sa_flush(c);
3571 	default:
3572 		printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
3573 		       c->event);
3574 		break;
3575 	}
3576 
3577 	return 0;
3578 
3579 }
3580 
3581 static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x,
3582 						struct xfrm_policy *xp)
3583 {
3584 	return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
3585 	       + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
3586 	       + nla_total_size(sizeof(struct xfrm_mark))
3587 	       + nla_total_size(xfrm_user_sec_ctx_size(x->security))
3588 	       + userpolicy_type_attrsize();
3589 }
3590 
3591 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
3592 			 struct xfrm_tmpl *xt, struct xfrm_policy *xp)
3593 {
3594 	__u32 seq = xfrm_get_acqseq();
3595 	struct xfrm_user_acquire *ua;
3596 	struct nlmsghdr *nlh;
3597 	int err;
3598 
3599 	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
3600 	if (nlh == NULL)
3601 		return -EMSGSIZE;
3602 
3603 	ua = nlmsg_data(nlh);
3604 	memcpy(&ua->id, &x->id, sizeof(ua->id));
3605 	memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
3606 	memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
3607 	copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
3608 	ua->aalgos = xt->aalgos;
3609 	ua->ealgos = xt->ealgos;
3610 	ua->calgos = xt->calgos;
3611 	ua->seq = x->km.seq = seq;
3612 
3613 	err = copy_to_user_tmpl(xp, skb);
3614 	if (!err)
3615 		err = copy_to_user_state_sec_ctx(x, skb);
3616 	if (!err)
3617 		err = copy_to_user_policy_type(xp->type, skb);
3618 	if (!err)
3619 		err = xfrm_mark_put(skb, &xp->mark);
3620 	if (!err)
3621 		err = xfrm_if_id_put(skb, xp->if_id);
3622 	if (!err && xp->xdo.dev)
3623 		err = copy_user_offload(&xp->xdo, skb);
3624 	if (err) {
3625 		nlmsg_cancel(skb, nlh);
3626 		return err;
3627 	}
3628 
3629 	nlmsg_end(skb, nlh);
3630 	return 0;
3631 }
3632 
3633 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
3634 			     struct xfrm_policy *xp)
3635 {
3636 	struct net *net = xs_net(x);
3637 	struct sk_buff *skb;
3638 	int err;
3639 
3640 	skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
3641 	if (skb == NULL)
3642 		return -ENOMEM;
3643 
3644 	err = build_acquire(skb, x, xt, xp);
3645 	BUG_ON(err < 0);
3646 
3647 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
3648 }
3649 
3650 /* User gives us xfrm_user_policy_info followed by an array of 0
3651  * or more templates.
3652  */
3653 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
3654 					       u8 *data, int len, int *dir)
3655 {
3656 	struct net *net = sock_net(sk);
3657 	struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
3658 	struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
3659 	struct xfrm_policy *xp;
3660 	int nr;
3661 
3662 	switch (sk->sk_family) {
3663 	case AF_INET:
3664 		if (opt != IP_XFRM_POLICY) {
3665 			*dir = -EOPNOTSUPP;
3666 			return NULL;
3667 		}
3668 		break;
3669 #if IS_ENABLED(CONFIG_IPV6)
3670 	case AF_INET6:
3671 		if (opt != IPV6_XFRM_POLICY) {
3672 			*dir = -EOPNOTSUPP;
3673 			return NULL;
3674 		}
3675 		break;
3676 #endif
3677 	default:
3678 		*dir = -EINVAL;
3679 		return NULL;
3680 	}
3681 
3682 	*dir = -EINVAL;
3683 
3684 	if (len < sizeof(*p) ||
3685 	    verify_newpolicy_info(p, NULL))
3686 		return NULL;
3687 
3688 	nr = ((len - sizeof(*p)) / sizeof(*ut));
3689 	if (validate_tmpl(nr, ut, p->sel.family, p->dir, NULL))
3690 		return NULL;
3691 
3692 	if (p->dir > XFRM_POLICY_OUT)
3693 		return NULL;
3694 
3695 	xp = xfrm_policy_alloc(net, GFP_ATOMIC);
3696 	if (xp == NULL) {
3697 		*dir = -ENOBUFS;
3698 		return NULL;
3699 	}
3700 
3701 	copy_from_user_policy(xp, p);
3702 	xp->type = XFRM_POLICY_TYPE_MAIN;
3703 	copy_templates(xp, ut, nr);
3704 
3705 	*dir = p->dir;
3706 
3707 	return xp;
3708 }
3709 
3710 static inline unsigned int xfrm_polexpire_msgsize(struct xfrm_policy *xp)
3711 {
3712 	return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
3713 	       + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
3714 	       + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
3715 	       + nla_total_size(sizeof(struct xfrm_mark))
3716 	       + userpolicy_type_attrsize();
3717 }
3718 
3719 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
3720 			   int dir, const struct km_event *c)
3721 {
3722 	struct xfrm_user_polexpire *upe;
3723 	int hard = c->data.hard;
3724 	struct nlmsghdr *nlh;
3725 	int err;
3726 
3727 	nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
3728 	if (nlh == NULL)
3729 		return -EMSGSIZE;
3730 
3731 	upe = nlmsg_data(nlh);
3732 	copy_to_user_policy(xp, &upe->pol, dir);
3733 	err = copy_to_user_tmpl(xp, skb);
3734 	if (!err)
3735 		err = copy_to_user_sec_ctx(xp, skb);
3736 	if (!err)
3737 		err = copy_to_user_policy_type(xp->type, skb);
3738 	if (!err)
3739 		err = xfrm_mark_put(skb, &xp->mark);
3740 	if (!err)
3741 		err = xfrm_if_id_put(skb, xp->if_id);
3742 	if (!err && xp->xdo.dev)
3743 		err = copy_user_offload(&xp->xdo, skb);
3744 	if (err) {
3745 		nlmsg_cancel(skb, nlh);
3746 		return err;
3747 	}
3748 	upe->hard = !!hard;
3749 
3750 	nlmsg_end(skb, nlh);
3751 	return 0;
3752 }
3753 
3754 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
3755 {
3756 	struct net *net = xp_net(xp);
3757 	struct sk_buff *skb;
3758 	int err;
3759 
3760 	skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
3761 	if (skb == NULL)
3762 		return -ENOMEM;
3763 
3764 	err = build_polexpire(skb, xp, dir, c);
3765 	BUG_ON(err < 0);
3766 
3767 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
3768 }
3769 
3770 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
3771 {
3772 	unsigned int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
3773 	struct net *net = xp_net(xp);
3774 	struct xfrm_userpolicy_info *p;
3775 	struct xfrm_userpolicy_id *id;
3776 	struct nlmsghdr *nlh;
3777 	struct sk_buff *skb;
3778 	unsigned int headlen;
3779 	int err;
3780 
3781 	headlen = sizeof(*p);
3782 	if (c->event == XFRM_MSG_DELPOLICY) {
3783 		len += nla_total_size(headlen);
3784 		headlen = sizeof(*id);
3785 	}
3786 	len += userpolicy_type_attrsize();
3787 	len += nla_total_size(sizeof(struct xfrm_mark));
3788 	len += NLMSG_ALIGN(headlen);
3789 
3790 	skb = nlmsg_new(len, GFP_ATOMIC);
3791 	if (skb == NULL)
3792 		return -ENOMEM;
3793 
3794 	nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
3795 	err = -EMSGSIZE;
3796 	if (nlh == NULL)
3797 		goto out_free_skb;
3798 
3799 	p = nlmsg_data(nlh);
3800 	if (c->event == XFRM_MSG_DELPOLICY) {
3801 		struct nlattr *attr;
3802 
3803 		id = nlmsg_data(nlh);
3804 		memset(id, 0, sizeof(*id));
3805 		id->dir = dir;
3806 		if (c->data.byid)
3807 			id->index = xp->index;
3808 		else
3809 			memcpy(&id->sel, &xp->selector, sizeof(id->sel));
3810 
3811 		attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
3812 		err = -EMSGSIZE;
3813 		if (attr == NULL)
3814 			goto out_free_skb;
3815 
3816 		p = nla_data(attr);
3817 	}
3818 
3819 	copy_to_user_policy(xp, p, dir);
3820 	err = copy_to_user_tmpl(xp, skb);
3821 	if (!err)
3822 		err = copy_to_user_policy_type(xp->type, skb);
3823 	if (!err)
3824 		err = xfrm_mark_put(skb, &xp->mark);
3825 	if (!err)
3826 		err = xfrm_if_id_put(skb, xp->if_id);
3827 	if (!err && xp->xdo.dev)
3828 		err = copy_user_offload(&xp->xdo, skb);
3829 	if (err)
3830 		goto out_free_skb;
3831 
3832 	nlmsg_end(skb, nlh);
3833 
3834 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
3835 
3836 out_free_skb:
3837 	kfree_skb(skb);
3838 	return err;
3839 }
3840 
3841 static int xfrm_notify_policy_flush(const struct km_event *c)
3842 {
3843 	struct net *net = c->net;
3844 	struct nlmsghdr *nlh;
3845 	struct sk_buff *skb;
3846 	int err;
3847 
3848 	skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
3849 	if (skb == NULL)
3850 		return -ENOMEM;
3851 
3852 	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
3853 	err = -EMSGSIZE;
3854 	if (nlh == NULL)
3855 		goto out_free_skb;
3856 	err = copy_to_user_policy_type(c->data.type, skb);
3857 	if (err)
3858 		goto out_free_skb;
3859 
3860 	nlmsg_end(skb, nlh);
3861 
3862 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
3863 
3864 out_free_skb:
3865 	kfree_skb(skb);
3866 	return err;
3867 }
3868 
3869 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
3870 {
3871 
3872 	switch (c->event) {
3873 	case XFRM_MSG_NEWPOLICY:
3874 	case XFRM_MSG_UPDPOLICY:
3875 	case XFRM_MSG_DELPOLICY:
3876 		return xfrm_notify_policy(xp, dir, c);
3877 	case XFRM_MSG_FLUSHPOLICY:
3878 		return xfrm_notify_policy_flush(c);
3879 	case XFRM_MSG_POLEXPIRE:
3880 		return xfrm_exp_policy_notify(xp, dir, c);
3881 	default:
3882 		printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
3883 		       c->event);
3884 	}
3885 
3886 	return 0;
3887 
3888 }
3889 
3890 static inline unsigned int xfrm_report_msgsize(void)
3891 {
3892 	return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
3893 }
3894 
3895 static int build_report(struct sk_buff *skb, u8 proto,
3896 			struct xfrm_selector *sel, xfrm_address_t *addr)
3897 {
3898 	struct xfrm_user_report *ur;
3899 	struct nlmsghdr *nlh;
3900 
3901 	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
3902 	if (nlh == NULL)
3903 		return -EMSGSIZE;
3904 
3905 	ur = nlmsg_data(nlh);
3906 	ur->proto = proto;
3907 	memcpy(&ur->sel, sel, sizeof(ur->sel));
3908 
3909 	if (addr) {
3910 		int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
3911 		if (err) {
3912 			nlmsg_cancel(skb, nlh);
3913 			return err;
3914 		}
3915 	}
3916 	nlmsg_end(skb, nlh);
3917 	return 0;
3918 }
3919 
3920 static int xfrm_send_report(struct net *net, u8 proto,
3921 			    struct xfrm_selector *sel, xfrm_address_t *addr)
3922 {
3923 	struct sk_buff *skb;
3924 	int err;
3925 
3926 	skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
3927 	if (skb == NULL)
3928 		return -ENOMEM;
3929 
3930 	err = build_report(skb, proto, sel, addr);
3931 	BUG_ON(err < 0);
3932 
3933 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
3934 }
3935 
3936 static inline unsigned int xfrm_mapping_msgsize(void)
3937 {
3938 	return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
3939 }
3940 
3941 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
3942 			 xfrm_address_t *new_saddr, __be16 new_sport)
3943 {
3944 	struct xfrm_user_mapping *um;
3945 	struct nlmsghdr *nlh;
3946 
3947 	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
3948 	if (nlh == NULL)
3949 		return -EMSGSIZE;
3950 
3951 	um = nlmsg_data(nlh);
3952 
3953 	memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
3954 	um->id.spi = x->id.spi;
3955 	um->id.family = x->props.family;
3956 	um->id.proto = x->id.proto;
3957 	memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
3958 	memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
3959 	um->new_sport = new_sport;
3960 	um->old_sport = x->encap->encap_sport;
3961 	um->reqid = x->props.reqid;
3962 
3963 	nlmsg_end(skb, nlh);
3964 	return 0;
3965 }
3966 
3967 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3968 			     __be16 sport)
3969 {
3970 	struct net *net = xs_net(x);
3971 	struct sk_buff *skb;
3972 	int err;
3973 
3974 	if (x->id.proto != IPPROTO_ESP)
3975 		return -EINVAL;
3976 
3977 	if (!x->encap)
3978 		return -EINVAL;
3979 
3980 	skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
3981 	if (skb == NULL)
3982 		return -ENOMEM;
3983 
3984 	err = build_mapping(skb, x, ipaddr, sport);
3985 	BUG_ON(err < 0);
3986 
3987 	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
3988 }
3989 
3990 static bool xfrm_is_alive(const struct km_event *c)
3991 {
3992 	return (bool)xfrm_acquire_is_on(c->net);
3993 }
3994 
3995 static struct xfrm_mgr netlink_mgr = {
3996 	.notify		= xfrm_send_state_notify,
3997 	.acquire	= xfrm_send_acquire,
3998 	.compile_policy	= xfrm_compile_policy,
3999 	.notify_policy	= xfrm_send_policy_notify,
4000 	.report		= xfrm_send_report,
4001 	.migrate	= xfrm_send_migrate,
4002 	.new_mapping	= xfrm_send_mapping,
4003 	.is_alive	= xfrm_is_alive,
4004 };
4005 
4006 static int __net_init xfrm_user_net_init(struct net *net)
4007 {
4008 	struct sock *nlsk;
4009 	struct netlink_kernel_cfg cfg = {
4010 		.groups	= XFRMNLGRP_MAX,
4011 		.input	= xfrm_netlink_rcv,
4012 	};
4013 
4014 	nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
4015 	if (nlsk == NULL)
4016 		return -ENOMEM;
4017 	net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
4018 	rcu_assign_pointer(net->xfrm.nlsk, nlsk);
4019 	return 0;
4020 }
4021 
4022 static void __net_exit xfrm_user_net_pre_exit(struct net *net)
4023 {
4024 	RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
4025 }
4026 
4027 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
4028 {
4029 	struct net *net;
4030 
4031 	list_for_each_entry(net, net_exit_list, exit_list)
4032 		netlink_kernel_release(net->xfrm.nlsk_stash);
4033 }
4034 
4035 static struct pernet_operations xfrm_user_net_ops = {
4036 	.init	    = xfrm_user_net_init,
4037 	.pre_exit   = xfrm_user_net_pre_exit,
4038 	.exit_batch = xfrm_user_net_exit,
4039 };
4040 
4041 static int __init xfrm_user_init(void)
4042 {
4043 	int rv;
4044 
4045 	printk(KERN_INFO "Initializing XFRM netlink socket\n");
4046 
4047 	rv = register_pernet_subsys(&xfrm_user_net_ops);
4048 	if (rv < 0)
4049 		return rv;
4050 	xfrm_register_km(&netlink_mgr);
4051 	return 0;
4052 }
4053 
4054 static void __exit xfrm_user_exit(void)
4055 {
4056 	xfrm_unregister_km(&netlink_mgr);
4057 	unregister_pernet_subsys(&xfrm_user_net_ops);
4058 }
4059 
4060 module_init(xfrm_user_init);
4061 module_exit(xfrm_user_exit);
4062 MODULE_DESCRIPTION("XFRM User interface");
4063 MODULE_LICENSE("GPL");
4064 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
4065