1 /* xfrm_user.c: User interface to configure xfrm engine. 2 * 3 * Copyright (C) 2002 David S. Miller (davem@redhat.com) 4 * 5 * Changes: 6 * Mitsuru KANDA @USAGI 7 * Kazunori MIYAZAWA @USAGI 8 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 9 * IPv6 support 10 * 11 */ 12 13 #include <linux/crypto.h> 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/slab.h> 18 #include <linux/socket.h> 19 #include <linux/string.h> 20 #include <linux/net.h> 21 #include <linux/skbuff.h> 22 #include <linux/pfkeyv2.h> 23 #include <linux/ipsec.h> 24 #include <linux/init.h> 25 #include <linux/security.h> 26 #include <net/sock.h> 27 #include <net/xfrm.h> 28 #include <net/netlink.h> 29 #include <net/ah.h> 30 #include <asm/uaccess.h> 31 #if IS_ENABLED(CONFIG_IPV6) 32 #include <linux/in6.h> 33 #endif 34 35 static inline int aead_len(struct xfrm_algo_aead *alg) 36 { 37 return sizeof(*alg) + ((alg->alg_key_len + 7) / 8); 38 } 39 40 static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type) 41 { 42 struct nlattr *rt = attrs[type]; 43 struct xfrm_algo *algp; 44 45 if (!rt) 46 return 0; 47 48 algp = nla_data(rt); 49 if (nla_len(rt) < xfrm_alg_len(algp)) 50 return -EINVAL; 51 52 switch (type) { 53 case XFRMA_ALG_AUTH: 54 case XFRMA_ALG_CRYPT: 55 case XFRMA_ALG_COMP: 56 break; 57 58 default: 59 return -EINVAL; 60 } 61 62 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 63 return 0; 64 } 65 66 static int verify_auth_trunc(struct nlattr **attrs) 67 { 68 struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC]; 69 struct xfrm_algo_auth *algp; 70 71 if (!rt) 72 return 0; 73 74 algp = nla_data(rt); 75 if (nla_len(rt) < xfrm_alg_auth_len(algp)) 76 return -EINVAL; 77 78 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 79 return 0; 80 } 81 82 static int verify_aead(struct nlattr **attrs) 83 { 84 struct nlattr *rt = attrs[XFRMA_ALG_AEAD]; 85 struct xfrm_algo_aead *algp; 86 87 if (!rt) 88 return 0; 89 90 algp = nla_data(rt); 91 if (nla_len(rt) < aead_len(algp)) 92 return -EINVAL; 93 94 algp->alg_name[CRYPTO_MAX_ALG_NAME - 1] = '\0'; 95 return 0; 96 } 97 98 static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type, 99 xfrm_address_t **addrp) 100 { 101 struct nlattr *rt = attrs[type]; 102 103 if (rt && addrp) 104 *addrp = nla_data(rt); 105 } 106 107 static inline int verify_sec_ctx_len(struct nlattr **attrs) 108 { 109 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 110 struct xfrm_user_sec_ctx *uctx; 111 112 if (!rt) 113 return 0; 114 115 uctx = nla_data(rt); 116 if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len)) 117 return -EINVAL; 118 119 return 0; 120 } 121 122 static inline int verify_replay(struct xfrm_usersa_info *p, 123 struct nlattr **attrs) 124 { 125 struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; 126 127 if ((p->flags & XFRM_STATE_ESN) && !rt) 128 return -EINVAL; 129 130 if (!rt) 131 return 0; 132 133 if (p->id.proto != IPPROTO_ESP) 134 return -EINVAL; 135 136 if (p->replay_window != 0) 137 return -EINVAL; 138 139 return 0; 140 } 141 142 static int verify_newsa_info(struct xfrm_usersa_info *p, 143 struct nlattr **attrs) 144 { 145 int err; 146 147 err = -EINVAL; 148 switch (p->family) { 149 case AF_INET: 150 break; 151 152 case AF_INET6: 153 #if IS_ENABLED(CONFIG_IPV6) 154 break; 155 #else 156 err = -EAFNOSUPPORT; 157 goto out; 158 #endif 159 160 default: 161 goto out; 162 } 163 164 err = -EINVAL; 165 switch (p->id.proto) { 166 case IPPROTO_AH: 167 if ((!attrs[XFRMA_ALG_AUTH] && 168 !attrs[XFRMA_ALG_AUTH_TRUNC]) || 169 attrs[XFRMA_ALG_AEAD] || 170 attrs[XFRMA_ALG_CRYPT] || 171 attrs[XFRMA_ALG_COMP] || 172 attrs[XFRMA_TFCPAD]) 173 goto out; 174 break; 175 176 case IPPROTO_ESP: 177 if (attrs[XFRMA_ALG_COMP]) 178 goto out; 179 if (!attrs[XFRMA_ALG_AUTH] && 180 !attrs[XFRMA_ALG_AUTH_TRUNC] && 181 !attrs[XFRMA_ALG_CRYPT] && 182 !attrs[XFRMA_ALG_AEAD]) 183 goto out; 184 if ((attrs[XFRMA_ALG_AUTH] || 185 attrs[XFRMA_ALG_AUTH_TRUNC] || 186 attrs[XFRMA_ALG_CRYPT]) && 187 attrs[XFRMA_ALG_AEAD]) 188 goto out; 189 if (attrs[XFRMA_TFCPAD] && 190 p->mode != XFRM_MODE_TUNNEL) 191 goto out; 192 break; 193 194 case IPPROTO_COMP: 195 if (!attrs[XFRMA_ALG_COMP] || 196 attrs[XFRMA_ALG_AEAD] || 197 attrs[XFRMA_ALG_AUTH] || 198 attrs[XFRMA_ALG_AUTH_TRUNC] || 199 attrs[XFRMA_ALG_CRYPT] || 200 attrs[XFRMA_TFCPAD]) 201 goto out; 202 break; 203 204 #if IS_ENABLED(CONFIG_IPV6) 205 case IPPROTO_DSTOPTS: 206 case IPPROTO_ROUTING: 207 if (attrs[XFRMA_ALG_COMP] || 208 attrs[XFRMA_ALG_AUTH] || 209 attrs[XFRMA_ALG_AUTH_TRUNC] || 210 attrs[XFRMA_ALG_AEAD] || 211 attrs[XFRMA_ALG_CRYPT] || 212 attrs[XFRMA_ENCAP] || 213 attrs[XFRMA_SEC_CTX] || 214 attrs[XFRMA_TFCPAD] || 215 !attrs[XFRMA_COADDR]) 216 goto out; 217 break; 218 #endif 219 220 default: 221 goto out; 222 } 223 224 if ((err = verify_aead(attrs))) 225 goto out; 226 if ((err = verify_auth_trunc(attrs))) 227 goto out; 228 if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH))) 229 goto out; 230 if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT))) 231 goto out; 232 if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP))) 233 goto out; 234 if ((err = verify_sec_ctx_len(attrs))) 235 goto out; 236 if ((err = verify_replay(p, attrs))) 237 goto out; 238 239 err = -EINVAL; 240 switch (p->mode) { 241 case XFRM_MODE_TRANSPORT: 242 case XFRM_MODE_TUNNEL: 243 case XFRM_MODE_ROUTEOPTIMIZATION: 244 case XFRM_MODE_BEET: 245 break; 246 247 default: 248 goto out; 249 } 250 251 err = 0; 252 253 out: 254 return err; 255 } 256 257 static int attach_one_algo(struct xfrm_algo **algpp, u8 *props, 258 struct xfrm_algo_desc *(*get_byname)(const char *, int), 259 struct nlattr *rta) 260 { 261 struct xfrm_algo *p, *ualg; 262 struct xfrm_algo_desc *algo; 263 264 if (!rta) 265 return 0; 266 267 ualg = nla_data(rta); 268 269 algo = get_byname(ualg->alg_name, 1); 270 if (!algo) 271 return -ENOSYS; 272 *props = algo->desc.sadb_alg_id; 273 274 p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL); 275 if (!p) 276 return -ENOMEM; 277 278 strcpy(p->alg_name, algo->name); 279 *algpp = p; 280 return 0; 281 } 282 283 static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props, 284 struct nlattr *rta) 285 { 286 struct xfrm_algo *ualg; 287 struct xfrm_algo_auth *p; 288 struct xfrm_algo_desc *algo; 289 290 if (!rta) 291 return 0; 292 293 ualg = nla_data(rta); 294 295 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 296 if (!algo) 297 return -ENOSYS; 298 *props = algo->desc.sadb_alg_id; 299 300 p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL); 301 if (!p) 302 return -ENOMEM; 303 304 strcpy(p->alg_name, algo->name); 305 p->alg_key_len = ualg->alg_key_len; 306 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 307 memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8); 308 309 *algpp = p; 310 return 0; 311 } 312 313 static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props, 314 struct nlattr *rta) 315 { 316 struct xfrm_algo_auth *p, *ualg; 317 struct xfrm_algo_desc *algo; 318 319 if (!rta) 320 return 0; 321 322 ualg = nla_data(rta); 323 324 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 325 if (!algo) 326 return -ENOSYS; 327 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN || 328 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 329 return -EINVAL; 330 *props = algo->desc.sadb_alg_id; 331 332 p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL); 333 if (!p) 334 return -ENOMEM; 335 336 strcpy(p->alg_name, algo->name); 337 if (!p->alg_trunc_len) 338 p->alg_trunc_len = algo->uinfo.auth.icv_truncbits; 339 340 *algpp = p; 341 return 0; 342 } 343 344 static int attach_aead(struct xfrm_algo_aead **algpp, u8 *props, 345 struct nlattr *rta) 346 { 347 struct xfrm_algo_aead *p, *ualg; 348 struct xfrm_algo_desc *algo; 349 350 if (!rta) 351 return 0; 352 353 ualg = nla_data(rta); 354 355 algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1); 356 if (!algo) 357 return -ENOSYS; 358 *props = algo->desc.sadb_alg_id; 359 360 p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL); 361 if (!p) 362 return -ENOMEM; 363 364 strcpy(p->alg_name, algo->name); 365 *algpp = p; 366 return 0; 367 } 368 369 static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn, 370 struct nlattr *rp) 371 { 372 struct xfrm_replay_state_esn *up; 373 374 if (!replay_esn || !rp) 375 return 0; 376 377 up = nla_data(rp); 378 379 if (xfrm_replay_state_esn_len(replay_esn) != 380 xfrm_replay_state_esn_len(up)) 381 return -EINVAL; 382 383 return 0; 384 } 385 386 static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn, 387 struct xfrm_replay_state_esn **preplay_esn, 388 struct nlattr *rta) 389 { 390 struct xfrm_replay_state_esn *p, *pp, *up; 391 392 if (!rta) 393 return 0; 394 395 up = nla_data(rta); 396 397 p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); 398 if (!p) 399 return -ENOMEM; 400 401 pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); 402 if (!pp) { 403 kfree(p); 404 return -ENOMEM; 405 } 406 407 *replay_esn = p; 408 *preplay_esn = pp; 409 410 return 0; 411 } 412 413 static inline int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx) 414 { 415 int len = 0; 416 417 if (xfrm_ctx) { 418 len += sizeof(struct xfrm_user_sec_ctx); 419 len += xfrm_ctx->ctx_len; 420 } 421 return len; 422 } 423 424 static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 425 { 426 memcpy(&x->id, &p->id, sizeof(x->id)); 427 memcpy(&x->sel, &p->sel, sizeof(x->sel)); 428 memcpy(&x->lft, &p->lft, sizeof(x->lft)); 429 x->props.mode = p->mode; 430 x->props.replay_window = p->replay_window; 431 x->props.reqid = p->reqid; 432 x->props.family = p->family; 433 memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr)); 434 x->props.flags = p->flags; 435 436 if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC)) 437 x->sel.family = p->family; 438 } 439 440 /* 441 * someday when pfkey also has support, we could have the code 442 * somehow made shareable and move it to xfrm_state.c - JHS 443 * 444 */ 445 static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs) 446 { 447 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 448 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; 449 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 450 struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; 451 struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; 452 453 if (re) { 454 struct xfrm_replay_state_esn *replay_esn; 455 replay_esn = nla_data(re); 456 memcpy(x->replay_esn, replay_esn, 457 xfrm_replay_state_esn_len(replay_esn)); 458 memcpy(x->preplay_esn, replay_esn, 459 xfrm_replay_state_esn_len(replay_esn)); 460 } 461 462 if (rp) { 463 struct xfrm_replay_state *replay; 464 replay = nla_data(rp); 465 memcpy(&x->replay, replay, sizeof(*replay)); 466 memcpy(&x->preplay, replay, sizeof(*replay)); 467 } 468 469 if (lt) { 470 struct xfrm_lifetime_cur *ltime; 471 ltime = nla_data(lt); 472 x->curlft.bytes = ltime->bytes; 473 x->curlft.packets = ltime->packets; 474 x->curlft.add_time = ltime->add_time; 475 x->curlft.use_time = ltime->use_time; 476 } 477 478 if (et) 479 x->replay_maxage = nla_get_u32(et); 480 481 if (rt) 482 x->replay_maxdiff = nla_get_u32(rt); 483 } 484 485 static struct xfrm_state *xfrm_state_construct(struct net *net, 486 struct xfrm_usersa_info *p, 487 struct nlattr **attrs, 488 int *errp) 489 { 490 struct xfrm_state *x = xfrm_state_alloc(net); 491 int err = -ENOMEM; 492 493 if (!x) 494 goto error_no_put; 495 496 copy_from_user_state(x, p); 497 498 if ((err = attach_aead(&x->aead, &x->props.ealgo, 499 attrs[XFRMA_ALG_AEAD]))) 500 goto error; 501 if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo, 502 attrs[XFRMA_ALG_AUTH_TRUNC]))) 503 goto error; 504 if (!x->props.aalgo) { 505 if ((err = attach_auth(&x->aalg, &x->props.aalgo, 506 attrs[XFRMA_ALG_AUTH]))) 507 goto error; 508 } 509 if ((err = attach_one_algo(&x->ealg, &x->props.ealgo, 510 xfrm_ealg_get_byname, 511 attrs[XFRMA_ALG_CRYPT]))) 512 goto error; 513 if ((err = attach_one_algo(&x->calg, &x->props.calgo, 514 xfrm_calg_get_byname, 515 attrs[XFRMA_ALG_COMP]))) 516 goto error; 517 518 if (attrs[XFRMA_ENCAP]) { 519 x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 520 sizeof(*x->encap), GFP_KERNEL); 521 if (x->encap == NULL) 522 goto error; 523 } 524 525 if (attrs[XFRMA_TFCPAD]) 526 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); 527 528 if (attrs[XFRMA_COADDR]) { 529 x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), 530 sizeof(*x->coaddr), GFP_KERNEL); 531 if (x->coaddr == NULL) 532 goto error; 533 } 534 535 xfrm_mark_get(attrs, &x->mark); 536 537 err = __xfrm_init_state(x, false); 538 if (err) 539 goto error; 540 541 if (attrs[XFRMA_SEC_CTX] && 542 security_xfrm_state_alloc(x, nla_data(attrs[XFRMA_SEC_CTX]))) 543 goto error; 544 545 if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn, 546 attrs[XFRMA_REPLAY_ESN_VAL]))) 547 goto error; 548 549 x->km.seq = p->seq; 550 x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth; 551 /* sysctl_xfrm_aevent_etime is in 100ms units */ 552 x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M; 553 554 if ((err = xfrm_init_replay(x))) 555 goto error; 556 557 /* override default values from above */ 558 xfrm_update_ae_params(x, attrs); 559 560 return x; 561 562 error: 563 x->km.state = XFRM_STATE_DEAD; 564 xfrm_state_put(x); 565 error_no_put: 566 *errp = err; 567 return NULL; 568 } 569 570 static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 571 struct nlattr **attrs) 572 { 573 struct net *net = sock_net(skb->sk); 574 struct xfrm_usersa_info *p = nlmsg_data(nlh); 575 struct xfrm_state *x; 576 int err; 577 struct km_event c; 578 uid_t loginuid = audit_get_loginuid(current); 579 u32 sessionid = audit_get_sessionid(current); 580 u32 sid; 581 582 err = verify_newsa_info(p, attrs); 583 if (err) 584 return err; 585 586 x = xfrm_state_construct(net, p, attrs, &err); 587 if (!x) 588 return err; 589 590 xfrm_state_hold(x); 591 if (nlh->nlmsg_type == XFRM_MSG_NEWSA) 592 err = xfrm_state_add(x); 593 else 594 err = xfrm_state_update(x); 595 596 security_task_getsecid(current, &sid); 597 xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid); 598 599 if (err < 0) { 600 x->km.state = XFRM_STATE_DEAD; 601 __xfrm_state_put(x); 602 goto out; 603 } 604 605 c.seq = nlh->nlmsg_seq; 606 c.pid = nlh->nlmsg_pid; 607 c.event = nlh->nlmsg_type; 608 609 km_state_notify(x, &c); 610 out: 611 xfrm_state_put(x); 612 return err; 613 } 614 615 static struct xfrm_state *xfrm_user_state_lookup(struct net *net, 616 struct xfrm_usersa_id *p, 617 struct nlattr **attrs, 618 int *errp) 619 { 620 struct xfrm_state *x = NULL; 621 struct xfrm_mark m; 622 int err; 623 u32 mark = xfrm_mark_get(attrs, &m); 624 625 if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) { 626 err = -ESRCH; 627 x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family); 628 } else { 629 xfrm_address_t *saddr = NULL; 630 631 verify_one_addr(attrs, XFRMA_SRCADDR, &saddr); 632 if (!saddr) { 633 err = -EINVAL; 634 goto out; 635 } 636 637 err = -ESRCH; 638 x = xfrm_state_lookup_byaddr(net, mark, 639 &p->daddr, saddr, 640 p->proto, p->family); 641 } 642 643 out: 644 if (!x && errp) 645 *errp = err; 646 return x; 647 } 648 649 static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 650 struct nlattr **attrs) 651 { 652 struct net *net = sock_net(skb->sk); 653 struct xfrm_state *x; 654 int err = -ESRCH; 655 struct km_event c; 656 struct xfrm_usersa_id *p = nlmsg_data(nlh); 657 uid_t loginuid = audit_get_loginuid(current); 658 u32 sessionid = audit_get_sessionid(current); 659 u32 sid; 660 661 x = xfrm_user_state_lookup(net, p, attrs, &err); 662 if (x == NULL) 663 return err; 664 665 if ((err = security_xfrm_state_delete(x)) != 0) 666 goto out; 667 668 if (xfrm_state_kern(x)) { 669 err = -EPERM; 670 goto out; 671 } 672 673 err = xfrm_state_delete(x); 674 675 if (err < 0) 676 goto out; 677 678 c.seq = nlh->nlmsg_seq; 679 c.pid = nlh->nlmsg_pid; 680 c.event = nlh->nlmsg_type; 681 km_state_notify(x, &c); 682 683 out: 684 security_task_getsecid(current, &sid); 685 xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid); 686 xfrm_state_put(x); 687 return err; 688 } 689 690 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) 691 { 692 memcpy(&p->id, &x->id, sizeof(p->id)); 693 memcpy(&p->sel, &x->sel, sizeof(p->sel)); 694 memcpy(&p->lft, &x->lft, sizeof(p->lft)); 695 memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); 696 memcpy(&p->stats, &x->stats, sizeof(p->stats)); 697 memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr)); 698 p->mode = x->props.mode; 699 p->replay_window = x->props.replay_window; 700 p->reqid = x->props.reqid; 701 p->family = x->props.family; 702 p->flags = x->props.flags; 703 p->seq = x->km.seq; 704 } 705 706 struct xfrm_dump_info { 707 struct sk_buff *in_skb; 708 struct sk_buff *out_skb; 709 u32 nlmsg_seq; 710 u16 nlmsg_flags; 711 }; 712 713 static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb) 714 { 715 struct xfrm_user_sec_ctx *uctx; 716 struct nlattr *attr; 717 int ctx_size = sizeof(*uctx) + s->ctx_len; 718 719 attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size); 720 if (attr == NULL) 721 return -EMSGSIZE; 722 723 uctx = nla_data(attr); 724 uctx->exttype = XFRMA_SEC_CTX; 725 uctx->len = ctx_size; 726 uctx->ctx_doi = s->ctx_doi; 727 uctx->ctx_alg = s->ctx_alg; 728 uctx->ctx_len = s->ctx_len; 729 memcpy(uctx + 1, s->ctx_str, s->ctx_len); 730 731 return 0; 732 } 733 734 static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) 735 { 736 struct xfrm_algo *algo; 737 struct nlattr *nla; 738 739 nla = nla_reserve(skb, XFRMA_ALG_AUTH, 740 sizeof(*algo) + (auth->alg_key_len + 7) / 8); 741 if (!nla) 742 return -EMSGSIZE; 743 744 algo = nla_data(nla); 745 strcpy(algo->alg_name, auth->alg_name); 746 memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); 747 algo->alg_key_len = auth->alg_key_len; 748 749 return 0; 750 } 751 752 /* Don't change this without updating xfrm_sa_len! */ 753 static int copy_to_user_state_extra(struct xfrm_state *x, 754 struct xfrm_usersa_info *p, 755 struct sk_buff *skb) 756 { 757 copy_to_user_state(x, p); 758 759 if (x->coaddr) 760 NLA_PUT(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr); 761 762 if (x->lastused) 763 NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); 764 765 if (x->aead) 766 NLA_PUT(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead); 767 if (x->aalg) { 768 if (copy_to_user_auth(x->aalg, skb)) 769 goto nla_put_failure; 770 771 NLA_PUT(skb, XFRMA_ALG_AUTH_TRUNC, 772 xfrm_alg_auth_len(x->aalg), x->aalg); 773 } 774 if (x->ealg) 775 NLA_PUT(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg); 776 if (x->calg) 777 NLA_PUT(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); 778 779 if (x->encap) 780 NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); 781 782 if (x->tfcpad) 783 NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad); 784 785 if (xfrm_mark_put(skb, &x->mark)) 786 goto nla_put_failure; 787 788 if (x->replay_esn) 789 NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, 790 xfrm_replay_state_esn_len(x->replay_esn), x->replay_esn); 791 792 if (x->security && copy_sec_ctx(x->security, skb) < 0) 793 goto nla_put_failure; 794 795 return 0; 796 797 nla_put_failure: 798 return -EMSGSIZE; 799 } 800 801 static int dump_one_state(struct xfrm_state *x, int count, void *ptr) 802 { 803 struct xfrm_dump_info *sp = ptr; 804 struct sk_buff *in_skb = sp->in_skb; 805 struct sk_buff *skb = sp->out_skb; 806 struct xfrm_usersa_info *p; 807 struct nlmsghdr *nlh; 808 int err; 809 810 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 811 XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags); 812 if (nlh == NULL) 813 return -EMSGSIZE; 814 815 p = nlmsg_data(nlh); 816 817 err = copy_to_user_state_extra(x, p, skb); 818 if (err) 819 goto nla_put_failure; 820 821 nlmsg_end(skb, nlh); 822 return 0; 823 824 nla_put_failure: 825 nlmsg_cancel(skb, nlh); 826 return err; 827 } 828 829 static int xfrm_dump_sa_done(struct netlink_callback *cb) 830 { 831 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 832 xfrm_state_walk_done(walk); 833 return 0; 834 } 835 836 static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) 837 { 838 struct net *net = sock_net(skb->sk); 839 struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1]; 840 struct xfrm_dump_info info; 841 842 BUILD_BUG_ON(sizeof(struct xfrm_state_walk) > 843 sizeof(cb->args) - sizeof(cb->args[0])); 844 845 info.in_skb = cb->skb; 846 info.out_skb = skb; 847 info.nlmsg_seq = cb->nlh->nlmsg_seq; 848 info.nlmsg_flags = NLM_F_MULTI; 849 850 if (!cb->args[0]) { 851 cb->args[0] = 1; 852 xfrm_state_walk_init(walk, 0); 853 } 854 855 (void) xfrm_state_walk(net, walk, dump_one_state, &info); 856 857 return skb->len; 858 } 859 860 static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, 861 struct xfrm_state *x, u32 seq) 862 { 863 struct xfrm_dump_info info; 864 struct sk_buff *skb; 865 866 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); 867 if (!skb) 868 return ERR_PTR(-ENOMEM); 869 870 info.in_skb = in_skb; 871 info.out_skb = skb; 872 info.nlmsg_seq = seq; 873 info.nlmsg_flags = 0; 874 875 if (dump_one_state(x, 0, &info)) { 876 kfree_skb(skb); 877 return NULL; 878 } 879 880 return skb; 881 } 882 883 static inline size_t xfrm_spdinfo_msgsize(void) 884 { 885 return NLMSG_ALIGN(4) 886 + nla_total_size(sizeof(struct xfrmu_spdinfo)) 887 + nla_total_size(sizeof(struct xfrmu_spdhinfo)); 888 } 889 890 static int build_spdinfo(struct sk_buff *skb, struct net *net, 891 u32 pid, u32 seq, u32 flags) 892 { 893 struct xfrmk_spdinfo si; 894 struct xfrmu_spdinfo spc; 895 struct xfrmu_spdhinfo sph; 896 struct nlmsghdr *nlh; 897 u32 *f; 898 899 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0); 900 if (nlh == NULL) /* shouldn't really happen ... */ 901 return -EMSGSIZE; 902 903 f = nlmsg_data(nlh); 904 *f = flags; 905 xfrm_spd_getinfo(net, &si); 906 spc.incnt = si.incnt; 907 spc.outcnt = si.outcnt; 908 spc.fwdcnt = si.fwdcnt; 909 spc.inscnt = si.inscnt; 910 spc.outscnt = si.outscnt; 911 spc.fwdscnt = si.fwdscnt; 912 sph.spdhcnt = si.spdhcnt; 913 sph.spdhmcnt = si.spdhmcnt; 914 915 NLA_PUT(skb, XFRMA_SPD_INFO, sizeof(spc), &spc); 916 NLA_PUT(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph); 917 918 return nlmsg_end(skb, nlh); 919 920 nla_put_failure: 921 nlmsg_cancel(skb, nlh); 922 return -EMSGSIZE; 923 } 924 925 static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 926 struct nlattr **attrs) 927 { 928 struct net *net = sock_net(skb->sk); 929 struct sk_buff *r_skb; 930 u32 *flags = nlmsg_data(nlh); 931 u32 spid = NETLINK_CB(skb).pid; 932 u32 seq = nlh->nlmsg_seq; 933 934 r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC); 935 if (r_skb == NULL) 936 return -ENOMEM; 937 938 if (build_spdinfo(r_skb, net, spid, seq, *flags) < 0) 939 BUG(); 940 941 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 942 } 943 944 static inline size_t xfrm_sadinfo_msgsize(void) 945 { 946 return NLMSG_ALIGN(4) 947 + nla_total_size(sizeof(struct xfrmu_sadhinfo)) 948 + nla_total_size(4); /* XFRMA_SAD_CNT */ 949 } 950 951 static int build_sadinfo(struct sk_buff *skb, struct net *net, 952 u32 pid, u32 seq, u32 flags) 953 { 954 struct xfrmk_sadinfo si; 955 struct xfrmu_sadhinfo sh; 956 struct nlmsghdr *nlh; 957 u32 *f; 958 959 nlh = nlmsg_put(skb, pid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0); 960 if (nlh == NULL) /* shouldn't really happen ... */ 961 return -EMSGSIZE; 962 963 f = nlmsg_data(nlh); 964 *f = flags; 965 xfrm_sad_getinfo(net, &si); 966 967 sh.sadhmcnt = si.sadhmcnt; 968 sh.sadhcnt = si.sadhcnt; 969 970 NLA_PUT_U32(skb, XFRMA_SAD_CNT, si.sadcnt); 971 NLA_PUT(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh); 972 973 return nlmsg_end(skb, nlh); 974 975 nla_put_failure: 976 nlmsg_cancel(skb, nlh); 977 return -EMSGSIZE; 978 } 979 980 static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh, 981 struct nlattr **attrs) 982 { 983 struct net *net = sock_net(skb->sk); 984 struct sk_buff *r_skb; 985 u32 *flags = nlmsg_data(nlh); 986 u32 spid = NETLINK_CB(skb).pid; 987 u32 seq = nlh->nlmsg_seq; 988 989 r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC); 990 if (r_skb == NULL) 991 return -ENOMEM; 992 993 if (build_sadinfo(r_skb, net, spid, seq, *flags) < 0) 994 BUG(); 995 996 return nlmsg_unicast(net->xfrm.nlsk, r_skb, spid); 997 } 998 999 static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1000 struct nlattr **attrs) 1001 { 1002 struct net *net = sock_net(skb->sk); 1003 struct xfrm_usersa_id *p = nlmsg_data(nlh); 1004 struct xfrm_state *x; 1005 struct sk_buff *resp_skb; 1006 int err = -ESRCH; 1007 1008 x = xfrm_user_state_lookup(net, p, attrs, &err); 1009 if (x == NULL) 1010 goto out_noput; 1011 1012 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1013 if (IS_ERR(resp_skb)) { 1014 err = PTR_ERR(resp_skb); 1015 } else { 1016 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); 1017 } 1018 xfrm_state_put(x); 1019 out_noput: 1020 return err; 1021 } 1022 1023 static int verify_userspi_info(struct xfrm_userspi_info *p) 1024 { 1025 switch (p->info.id.proto) { 1026 case IPPROTO_AH: 1027 case IPPROTO_ESP: 1028 break; 1029 1030 case IPPROTO_COMP: 1031 /* IPCOMP spi is 16-bits. */ 1032 if (p->max >= 0x10000) 1033 return -EINVAL; 1034 break; 1035 1036 default: 1037 return -EINVAL; 1038 } 1039 1040 if (p->min > p->max) 1041 return -EINVAL; 1042 1043 return 0; 1044 } 1045 1046 static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh, 1047 struct nlattr **attrs) 1048 { 1049 struct net *net = sock_net(skb->sk); 1050 struct xfrm_state *x; 1051 struct xfrm_userspi_info *p; 1052 struct sk_buff *resp_skb; 1053 xfrm_address_t *daddr; 1054 int family; 1055 int err; 1056 u32 mark; 1057 struct xfrm_mark m; 1058 1059 p = nlmsg_data(nlh); 1060 err = verify_userspi_info(p); 1061 if (err) 1062 goto out_noput; 1063 1064 family = p->info.family; 1065 daddr = &p->info.id.daddr; 1066 1067 x = NULL; 1068 1069 mark = xfrm_mark_get(attrs, &m); 1070 if (p->info.seq) { 1071 x = xfrm_find_acq_byseq(net, mark, p->info.seq); 1072 if (x && xfrm_addr_cmp(&x->id.daddr, daddr, family)) { 1073 xfrm_state_put(x); 1074 x = NULL; 1075 } 1076 } 1077 1078 if (!x) 1079 x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid, 1080 p->info.id.proto, daddr, 1081 &p->info.saddr, 1, 1082 family); 1083 err = -ENOENT; 1084 if (x == NULL) 1085 goto out_noput; 1086 1087 err = xfrm_alloc_spi(x, p->min, p->max); 1088 if (err) 1089 goto out; 1090 1091 resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq); 1092 if (IS_ERR(resp_skb)) { 1093 err = PTR_ERR(resp_skb); 1094 goto out; 1095 } 1096 1097 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).pid); 1098 1099 out: 1100 xfrm_state_put(x); 1101 out_noput: 1102 return err; 1103 } 1104 1105 static int verify_policy_dir(u8 dir) 1106 { 1107 switch (dir) { 1108 case XFRM_POLICY_IN: 1109 case XFRM_POLICY_OUT: 1110 case XFRM_POLICY_FWD: 1111 break; 1112 1113 default: 1114 return -EINVAL; 1115 } 1116 1117 return 0; 1118 } 1119 1120 static int verify_policy_type(u8 type) 1121 { 1122 switch (type) { 1123 case XFRM_POLICY_TYPE_MAIN: 1124 #ifdef CONFIG_XFRM_SUB_POLICY 1125 case XFRM_POLICY_TYPE_SUB: 1126 #endif 1127 break; 1128 1129 default: 1130 return -EINVAL; 1131 } 1132 1133 return 0; 1134 } 1135 1136 static int verify_newpolicy_info(struct xfrm_userpolicy_info *p) 1137 { 1138 switch (p->share) { 1139 case XFRM_SHARE_ANY: 1140 case XFRM_SHARE_SESSION: 1141 case XFRM_SHARE_USER: 1142 case XFRM_SHARE_UNIQUE: 1143 break; 1144 1145 default: 1146 return -EINVAL; 1147 } 1148 1149 switch (p->action) { 1150 case XFRM_POLICY_ALLOW: 1151 case XFRM_POLICY_BLOCK: 1152 break; 1153 1154 default: 1155 return -EINVAL; 1156 } 1157 1158 switch (p->sel.family) { 1159 case AF_INET: 1160 break; 1161 1162 case AF_INET6: 1163 #if IS_ENABLED(CONFIG_IPV6) 1164 break; 1165 #else 1166 return -EAFNOSUPPORT; 1167 #endif 1168 1169 default: 1170 return -EINVAL; 1171 } 1172 1173 return verify_policy_dir(p->dir); 1174 } 1175 1176 static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs) 1177 { 1178 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1179 struct xfrm_user_sec_ctx *uctx; 1180 1181 if (!rt) 1182 return 0; 1183 1184 uctx = nla_data(rt); 1185 return security_xfrm_policy_alloc(&pol->security, uctx); 1186 } 1187 1188 static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut, 1189 int nr) 1190 { 1191 int i; 1192 1193 xp->xfrm_nr = nr; 1194 for (i = 0; i < nr; i++, ut++) { 1195 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 1196 1197 memcpy(&t->id, &ut->id, sizeof(struct xfrm_id)); 1198 memcpy(&t->saddr, &ut->saddr, 1199 sizeof(xfrm_address_t)); 1200 t->reqid = ut->reqid; 1201 t->mode = ut->mode; 1202 t->share = ut->share; 1203 t->optional = ut->optional; 1204 t->aalgos = ut->aalgos; 1205 t->ealgos = ut->ealgos; 1206 t->calgos = ut->calgos; 1207 /* If all masks are ~0, then we allow all algorithms. */ 1208 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos); 1209 t->encap_family = ut->family; 1210 } 1211 } 1212 1213 static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) 1214 { 1215 int i; 1216 1217 if (nr > XFRM_MAX_DEPTH) 1218 return -EINVAL; 1219 1220 for (i = 0; i < nr; i++) { 1221 /* We never validated the ut->family value, so many 1222 * applications simply leave it at zero. The check was 1223 * never made and ut->family was ignored because all 1224 * templates could be assumed to have the same family as 1225 * the policy itself. Now that we will have ipv4-in-ipv6 1226 * and ipv6-in-ipv4 tunnels, this is no longer true. 1227 */ 1228 if (!ut[i].family) 1229 ut[i].family = family; 1230 1231 switch (ut[i].family) { 1232 case AF_INET: 1233 break; 1234 #if IS_ENABLED(CONFIG_IPV6) 1235 case AF_INET6: 1236 break; 1237 #endif 1238 default: 1239 return -EINVAL; 1240 } 1241 } 1242 1243 return 0; 1244 } 1245 1246 static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs) 1247 { 1248 struct nlattr *rt = attrs[XFRMA_TMPL]; 1249 1250 if (!rt) { 1251 pol->xfrm_nr = 0; 1252 } else { 1253 struct xfrm_user_tmpl *utmpl = nla_data(rt); 1254 int nr = nla_len(rt) / sizeof(*utmpl); 1255 int err; 1256 1257 err = validate_tmpl(nr, utmpl, pol->family); 1258 if (err) 1259 return err; 1260 1261 copy_templates(pol, utmpl, nr); 1262 } 1263 return 0; 1264 } 1265 1266 static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs) 1267 { 1268 struct nlattr *rt = attrs[XFRMA_POLICY_TYPE]; 1269 struct xfrm_userpolicy_type *upt; 1270 u8 type = XFRM_POLICY_TYPE_MAIN; 1271 int err; 1272 1273 if (rt) { 1274 upt = nla_data(rt); 1275 type = upt->type; 1276 } 1277 1278 err = verify_policy_type(type); 1279 if (err) 1280 return err; 1281 1282 *tp = type; 1283 return 0; 1284 } 1285 1286 static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p) 1287 { 1288 xp->priority = p->priority; 1289 xp->index = p->index; 1290 memcpy(&xp->selector, &p->sel, sizeof(xp->selector)); 1291 memcpy(&xp->lft, &p->lft, sizeof(xp->lft)); 1292 xp->action = p->action; 1293 xp->flags = p->flags; 1294 xp->family = p->sel.family; 1295 /* XXX xp->share = p->share; */ 1296 } 1297 1298 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) 1299 { 1300 memcpy(&p->sel, &xp->selector, sizeof(p->sel)); 1301 memcpy(&p->lft, &xp->lft, sizeof(p->lft)); 1302 memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); 1303 p->priority = xp->priority; 1304 p->index = xp->index; 1305 p->sel.family = xp->family; 1306 p->dir = dir; 1307 p->action = xp->action; 1308 p->flags = xp->flags; 1309 p->share = XFRM_SHARE_ANY; /* XXX xp->share */ 1310 } 1311 1312 static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp) 1313 { 1314 struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL); 1315 int err; 1316 1317 if (!xp) { 1318 *errp = -ENOMEM; 1319 return NULL; 1320 } 1321 1322 copy_from_user_policy(xp, p); 1323 1324 err = copy_from_user_policy_type(&xp->type, attrs); 1325 if (err) 1326 goto error; 1327 1328 if (!(err = copy_from_user_tmpl(xp, attrs))) 1329 err = copy_from_user_sec_ctx(xp, attrs); 1330 if (err) 1331 goto error; 1332 1333 xfrm_mark_get(attrs, &xp->mark); 1334 1335 return xp; 1336 error: 1337 *errp = err; 1338 xp->walk.dead = 1; 1339 xfrm_policy_destroy(xp); 1340 return NULL; 1341 } 1342 1343 static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1344 struct nlattr **attrs) 1345 { 1346 struct net *net = sock_net(skb->sk); 1347 struct xfrm_userpolicy_info *p = nlmsg_data(nlh); 1348 struct xfrm_policy *xp; 1349 struct km_event c; 1350 int err; 1351 int excl; 1352 uid_t loginuid = audit_get_loginuid(current); 1353 u32 sessionid = audit_get_sessionid(current); 1354 u32 sid; 1355 1356 err = verify_newpolicy_info(p); 1357 if (err) 1358 return err; 1359 err = verify_sec_ctx_len(attrs); 1360 if (err) 1361 return err; 1362 1363 xp = xfrm_policy_construct(net, p, attrs, &err); 1364 if (!xp) 1365 return err; 1366 1367 /* shouldn't excl be based on nlh flags?? 1368 * Aha! this is anti-netlink really i.e more pfkey derived 1369 * in netlink excl is a flag and you wouldnt need 1370 * a type XFRM_MSG_UPDPOLICY - JHS */ 1371 excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY; 1372 err = xfrm_policy_insert(p->dir, xp, excl); 1373 security_task_getsecid(current, &sid); 1374 xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid); 1375 1376 if (err) { 1377 security_xfrm_policy_free(xp->security); 1378 kfree(xp); 1379 return err; 1380 } 1381 1382 c.event = nlh->nlmsg_type; 1383 c.seq = nlh->nlmsg_seq; 1384 c.pid = nlh->nlmsg_pid; 1385 km_policy_notify(xp, p->dir, &c); 1386 1387 xfrm_pol_put(xp); 1388 1389 return 0; 1390 } 1391 1392 static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) 1393 { 1394 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH]; 1395 int i; 1396 1397 if (xp->xfrm_nr == 0) 1398 return 0; 1399 1400 for (i = 0; i < xp->xfrm_nr; i++) { 1401 struct xfrm_user_tmpl *up = &vec[i]; 1402 struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; 1403 1404 memcpy(&up->id, &kp->id, sizeof(up->id)); 1405 up->family = kp->encap_family; 1406 memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); 1407 up->reqid = kp->reqid; 1408 up->mode = kp->mode; 1409 up->share = kp->share; 1410 up->optional = kp->optional; 1411 up->aalgos = kp->aalgos; 1412 up->ealgos = kp->ealgos; 1413 up->calgos = kp->calgos; 1414 } 1415 1416 return nla_put(skb, XFRMA_TMPL, 1417 sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec); 1418 } 1419 1420 static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb) 1421 { 1422 if (x->security) { 1423 return copy_sec_ctx(x->security, skb); 1424 } 1425 return 0; 1426 } 1427 1428 static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb) 1429 { 1430 if (xp->security) { 1431 return copy_sec_ctx(xp->security, skb); 1432 } 1433 return 0; 1434 } 1435 static inline size_t userpolicy_type_attrsize(void) 1436 { 1437 #ifdef CONFIG_XFRM_SUB_POLICY 1438 return nla_total_size(sizeof(struct xfrm_userpolicy_type)); 1439 #else 1440 return 0; 1441 #endif 1442 } 1443 1444 #ifdef CONFIG_XFRM_SUB_POLICY 1445 static int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1446 { 1447 struct xfrm_userpolicy_type upt = { 1448 .type = type, 1449 }; 1450 1451 return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt); 1452 } 1453 1454 #else 1455 static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb) 1456 { 1457 return 0; 1458 } 1459 #endif 1460 1461 static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr) 1462 { 1463 struct xfrm_dump_info *sp = ptr; 1464 struct xfrm_userpolicy_info *p; 1465 struct sk_buff *in_skb = sp->in_skb; 1466 struct sk_buff *skb = sp->out_skb; 1467 struct nlmsghdr *nlh; 1468 1469 nlh = nlmsg_put(skb, NETLINK_CB(in_skb).pid, sp->nlmsg_seq, 1470 XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags); 1471 if (nlh == NULL) 1472 return -EMSGSIZE; 1473 1474 p = nlmsg_data(nlh); 1475 copy_to_user_policy(xp, p, dir); 1476 if (copy_to_user_tmpl(xp, skb) < 0) 1477 goto nlmsg_failure; 1478 if (copy_to_user_sec_ctx(xp, skb)) 1479 goto nlmsg_failure; 1480 if (copy_to_user_policy_type(xp->type, skb) < 0) 1481 goto nlmsg_failure; 1482 if (xfrm_mark_put(skb, &xp->mark)) 1483 goto nla_put_failure; 1484 1485 nlmsg_end(skb, nlh); 1486 return 0; 1487 1488 nla_put_failure: 1489 nlmsg_failure: 1490 nlmsg_cancel(skb, nlh); 1491 return -EMSGSIZE; 1492 } 1493 1494 static int xfrm_dump_policy_done(struct netlink_callback *cb) 1495 { 1496 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1497 1498 xfrm_policy_walk_done(walk); 1499 return 0; 1500 } 1501 1502 static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) 1503 { 1504 struct net *net = sock_net(skb->sk); 1505 struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; 1506 struct xfrm_dump_info info; 1507 1508 BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) > 1509 sizeof(cb->args) - sizeof(cb->args[0])); 1510 1511 info.in_skb = cb->skb; 1512 info.out_skb = skb; 1513 info.nlmsg_seq = cb->nlh->nlmsg_seq; 1514 info.nlmsg_flags = NLM_F_MULTI; 1515 1516 if (!cb->args[0]) { 1517 cb->args[0] = 1; 1518 xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); 1519 } 1520 1521 (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); 1522 1523 return skb->len; 1524 } 1525 1526 static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, 1527 struct xfrm_policy *xp, 1528 int dir, u32 seq) 1529 { 1530 struct xfrm_dump_info info; 1531 struct sk_buff *skb; 1532 1533 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1534 if (!skb) 1535 return ERR_PTR(-ENOMEM); 1536 1537 info.in_skb = in_skb; 1538 info.out_skb = skb; 1539 info.nlmsg_seq = seq; 1540 info.nlmsg_flags = 0; 1541 1542 if (dump_one_policy(xp, dir, 0, &info) < 0) { 1543 kfree_skb(skb); 1544 return NULL; 1545 } 1546 1547 return skb; 1548 } 1549 1550 static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1551 struct nlattr **attrs) 1552 { 1553 struct net *net = sock_net(skb->sk); 1554 struct xfrm_policy *xp; 1555 struct xfrm_userpolicy_id *p; 1556 u8 type = XFRM_POLICY_TYPE_MAIN; 1557 int err; 1558 struct km_event c; 1559 int delete; 1560 struct xfrm_mark m; 1561 u32 mark = xfrm_mark_get(attrs, &m); 1562 1563 p = nlmsg_data(nlh); 1564 delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY; 1565 1566 err = copy_from_user_policy_type(&type, attrs); 1567 if (err) 1568 return err; 1569 1570 err = verify_policy_dir(p->dir); 1571 if (err) 1572 return err; 1573 1574 if (p->index) 1575 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, delete, &err); 1576 else { 1577 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1578 struct xfrm_sec_ctx *ctx; 1579 1580 err = verify_sec_ctx_len(attrs); 1581 if (err) 1582 return err; 1583 1584 ctx = NULL; 1585 if (rt) { 1586 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1587 1588 err = security_xfrm_policy_alloc(&ctx, uctx); 1589 if (err) 1590 return err; 1591 } 1592 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, &p->sel, 1593 ctx, delete, &err); 1594 security_xfrm_policy_free(ctx); 1595 } 1596 if (xp == NULL) 1597 return -ENOENT; 1598 1599 if (!delete) { 1600 struct sk_buff *resp_skb; 1601 1602 resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq); 1603 if (IS_ERR(resp_skb)) { 1604 err = PTR_ERR(resp_skb); 1605 } else { 1606 err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, 1607 NETLINK_CB(skb).pid); 1608 } 1609 } else { 1610 uid_t loginuid = audit_get_loginuid(current); 1611 u32 sessionid = audit_get_sessionid(current); 1612 u32 sid; 1613 1614 security_task_getsecid(current, &sid); 1615 xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid, 1616 sid); 1617 1618 if (err != 0) 1619 goto out; 1620 1621 c.data.byid = p->index; 1622 c.event = nlh->nlmsg_type; 1623 c.seq = nlh->nlmsg_seq; 1624 c.pid = nlh->nlmsg_pid; 1625 km_policy_notify(xp, p->dir, &c); 1626 } 1627 1628 out: 1629 xfrm_pol_put(xp); 1630 return err; 1631 } 1632 1633 static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, 1634 struct nlattr **attrs) 1635 { 1636 struct net *net = sock_net(skb->sk); 1637 struct km_event c; 1638 struct xfrm_usersa_flush *p = nlmsg_data(nlh); 1639 struct xfrm_audit audit_info; 1640 int err; 1641 1642 audit_info.loginuid = audit_get_loginuid(current); 1643 audit_info.sessionid = audit_get_sessionid(current); 1644 security_task_getsecid(current, &audit_info.secid); 1645 err = xfrm_state_flush(net, p->proto, &audit_info); 1646 if (err) { 1647 if (err == -ESRCH) /* empty table */ 1648 return 0; 1649 return err; 1650 } 1651 c.data.proto = p->proto; 1652 c.event = nlh->nlmsg_type; 1653 c.seq = nlh->nlmsg_seq; 1654 c.pid = nlh->nlmsg_pid; 1655 c.net = net; 1656 km_state_notify(NULL, &c); 1657 1658 return 0; 1659 } 1660 1661 static inline size_t xfrm_aevent_msgsize(struct xfrm_state *x) 1662 { 1663 size_t replay_size = x->replay_esn ? 1664 xfrm_replay_state_esn_len(x->replay_esn) : 1665 sizeof(struct xfrm_replay_state); 1666 1667 return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id)) 1668 + nla_total_size(replay_size) 1669 + nla_total_size(sizeof(struct xfrm_lifetime_cur)) 1670 + nla_total_size(sizeof(struct xfrm_mark)) 1671 + nla_total_size(4) /* XFRM_AE_RTHR */ 1672 + nla_total_size(4); /* XFRM_AE_ETHR */ 1673 } 1674 1675 static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 1676 { 1677 struct xfrm_aevent_id *id; 1678 struct nlmsghdr *nlh; 1679 1680 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0); 1681 if (nlh == NULL) 1682 return -EMSGSIZE; 1683 1684 id = nlmsg_data(nlh); 1685 memcpy(&id->sa_id.daddr, &x->id.daddr,sizeof(x->id.daddr)); 1686 id->sa_id.spi = x->id.spi; 1687 id->sa_id.family = x->props.family; 1688 id->sa_id.proto = x->id.proto; 1689 memcpy(&id->saddr, &x->props.saddr,sizeof(x->props.saddr)); 1690 id->reqid = x->props.reqid; 1691 id->flags = c->data.aevent; 1692 1693 if (x->replay_esn) 1694 NLA_PUT(skb, XFRMA_REPLAY_ESN_VAL, 1695 xfrm_replay_state_esn_len(x->replay_esn), 1696 x->replay_esn); 1697 else 1698 NLA_PUT(skb, XFRMA_REPLAY_VAL, sizeof(x->replay), &x->replay); 1699 1700 NLA_PUT(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft); 1701 1702 if (id->flags & XFRM_AE_RTHR) 1703 NLA_PUT_U32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff); 1704 1705 if (id->flags & XFRM_AE_ETHR) 1706 NLA_PUT_U32(skb, XFRMA_ETIMER_THRESH, 1707 x->replay_maxage * 10 / HZ); 1708 1709 if (xfrm_mark_put(skb, &x->mark)) 1710 goto nla_put_failure; 1711 1712 return nlmsg_end(skb, nlh); 1713 1714 nla_put_failure: 1715 nlmsg_cancel(skb, nlh); 1716 return -EMSGSIZE; 1717 } 1718 1719 static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1720 struct nlattr **attrs) 1721 { 1722 struct net *net = sock_net(skb->sk); 1723 struct xfrm_state *x; 1724 struct sk_buff *r_skb; 1725 int err; 1726 struct km_event c; 1727 u32 mark; 1728 struct xfrm_mark m; 1729 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1730 struct xfrm_usersa_id *id = &p->sa_id; 1731 1732 mark = xfrm_mark_get(attrs, &m); 1733 1734 x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family); 1735 if (x == NULL) 1736 return -ESRCH; 1737 1738 r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 1739 if (r_skb == NULL) { 1740 xfrm_state_put(x); 1741 return -ENOMEM; 1742 } 1743 1744 /* 1745 * XXX: is this lock really needed - none of the other 1746 * gets lock (the concern is things getting updated 1747 * while we are still reading) - jhs 1748 */ 1749 spin_lock_bh(&x->lock); 1750 c.data.aevent = p->flags; 1751 c.seq = nlh->nlmsg_seq; 1752 c.pid = nlh->nlmsg_pid; 1753 1754 if (build_aevent(r_skb, x, &c) < 0) 1755 BUG(); 1756 err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).pid); 1757 spin_unlock_bh(&x->lock); 1758 xfrm_state_put(x); 1759 return err; 1760 } 1761 1762 static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, 1763 struct nlattr **attrs) 1764 { 1765 struct net *net = sock_net(skb->sk); 1766 struct xfrm_state *x; 1767 struct km_event c; 1768 int err = - EINVAL; 1769 u32 mark = 0; 1770 struct xfrm_mark m; 1771 struct xfrm_aevent_id *p = nlmsg_data(nlh); 1772 struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; 1773 struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; 1774 struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; 1775 1776 if (!lt && !rp && !re) 1777 return err; 1778 1779 /* pedantic mode - thou shalt sayeth replaceth */ 1780 if (!(nlh->nlmsg_flags&NLM_F_REPLACE)) 1781 return err; 1782 1783 mark = xfrm_mark_get(attrs, &m); 1784 1785 x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family); 1786 if (x == NULL) 1787 return -ESRCH; 1788 1789 if (x->km.state != XFRM_STATE_VALID) 1790 goto out; 1791 1792 err = xfrm_replay_verify_len(x->replay_esn, rp); 1793 if (err) 1794 goto out; 1795 1796 spin_lock_bh(&x->lock); 1797 xfrm_update_ae_params(x, attrs); 1798 spin_unlock_bh(&x->lock); 1799 1800 c.event = nlh->nlmsg_type; 1801 c.seq = nlh->nlmsg_seq; 1802 c.pid = nlh->nlmsg_pid; 1803 c.data.aevent = XFRM_AE_CU; 1804 km_state_notify(x, &c); 1805 err = 0; 1806 out: 1807 xfrm_state_put(x); 1808 return err; 1809 } 1810 1811 static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, 1812 struct nlattr **attrs) 1813 { 1814 struct net *net = sock_net(skb->sk); 1815 struct km_event c; 1816 u8 type = XFRM_POLICY_TYPE_MAIN; 1817 int err; 1818 struct xfrm_audit audit_info; 1819 1820 err = copy_from_user_policy_type(&type, attrs); 1821 if (err) 1822 return err; 1823 1824 audit_info.loginuid = audit_get_loginuid(current); 1825 audit_info.sessionid = audit_get_sessionid(current); 1826 security_task_getsecid(current, &audit_info.secid); 1827 err = xfrm_policy_flush(net, type, &audit_info); 1828 if (err) { 1829 if (err == -ESRCH) /* empty table */ 1830 return 0; 1831 return err; 1832 } 1833 1834 c.data.type = type; 1835 c.event = nlh->nlmsg_type; 1836 c.seq = nlh->nlmsg_seq; 1837 c.pid = nlh->nlmsg_pid; 1838 c.net = net; 1839 km_policy_notify(NULL, 0, &c); 1840 return 0; 1841 } 1842 1843 static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1844 struct nlattr **attrs) 1845 { 1846 struct net *net = sock_net(skb->sk); 1847 struct xfrm_policy *xp; 1848 struct xfrm_user_polexpire *up = nlmsg_data(nlh); 1849 struct xfrm_userpolicy_info *p = &up->pol; 1850 u8 type = XFRM_POLICY_TYPE_MAIN; 1851 int err = -ENOENT; 1852 struct xfrm_mark m; 1853 u32 mark = xfrm_mark_get(attrs, &m); 1854 1855 err = copy_from_user_policy_type(&type, attrs); 1856 if (err) 1857 return err; 1858 1859 err = verify_policy_dir(p->dir); 1860 if (err) 1861 return err; 1862 1863 if (p->index) 1864 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); 1865 else { 1866 struct nlattr *rt = attrs[XFRMA_SEC_CTX]; 1867 struct xfrm_sec_ctx *ctx; 1868 1869 err = verify_sec_ctx_len(attrs); 1870 if (err) 1871 return err; 1872 1873 ctx = NULL; 1874 if (rt) { 1875 struct xfrm_user_sec_ctx *uctx = nla_data(rt); 1876 1877 err = security_xfrm_policy_alloc(&ctx, uctx); 1878 if (err) 1879 return err; 1880 } 1881 xp = xfrm_policy_bysel_ctx(net, mark, type, p->dir, 1882 &p->sel, ctx, 0, &err); 1883 security_xfrm_policy_free(ctx); 1884 } 1885 if (xp == NULL) 1886 return -ENOENT; 1887 1888 if (unlikely(xp->walk.dead)) 1889 goto out; 1890 1891 err = 0; 1892 if (up->hard) { 1893 uid_t loginuid = audit_get_loginuid(current); 1894 u32 sessionid = audit_get_sessionid(current); 1895 u32 sid; 1896 1897 security_task_getsecid(current, &sid); 1898 xfrm_policy_delete(xp, p->dir); 1899 xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid); 1900 1901 } else { 1902 // reset the timers here? 1903 WARN(1, "Dont know what to do with soft policy expire\n"); 1904 } 1905 km_policy_expired(xp, p->dir, up->hard, current->pid); 1906 1907 out: 1908 xfrm_pol_put(xp); 1909 return err; 1910 } 1911 1912 static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh, 1913 struct nlattr **attrs) 1914 { 1915 struct net *net = sock_net(skb->sk); 1916 struct xfrm_state *x; 1917 int err; 1918 struct xfrm_user_expire *ue = nlmsg_data(nlh); 1919 struct xfrm_usersa_info *p = &ue->state; 1920 struct xfrm_mark m; 1921 u32 mark = xfrm_mark_get(attrs, &m); 1922 1923 x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family); 1924 1925 err = -ENOENT; 1926 if (x == NULL) 1927 return err; 1928 1929 spin_lock_bh(&x->lock); 1930 err = -EINVAL; 1931 if (x->km.state != XFRM_STATE_VALID) 1932 goto out; 1933 km_state_expired(x, ue->hard, current->pid); 1934 1935 if (ue->hard) { 1936 uid_t loginuid = audit_get_loginuid(current); 1937 u32 sessionid = audit_get_sessionid(current); 1938 u32 sid; 1939 1940 security_task_getsecid(current, &sid); 1941 __xfrm_state_delete(x); 1942 xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid); 1943 } 1944 err = 0; 1945 out: 1946 spin_unlock_bh(&x->lock); 1947 xfrm_state_put(x); 1948 return err; 1949 } 1950 1951 static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh, 1952 struct nlattr **attrs) 1953 { 1954 struct net *net = sock_net(skb->sk); 1955 struct xfrm_policy *xp; 1956 struct xfrm_user_tmpl *ut; 1957 int i; 1958 struct nlattr *rt = attrs[XFRMA_TMPL]; 1959 struct xfrm_mark mark; 1960 1961 struct xfrm_user_acquire *ua = nlmsg_data(nlh); 1962 struct xfrm_state *x = xfrm_state_alloc(net); 1963 int err = -ENOMEM; 1964 1965 if (!x) 1966 goto nomem; 1967 1968 xfrm_mark_get(attrs, &mark); 1969 1970 err = verify_newpolicy_info(&ua->policy); 1971 if (err) 1972 goto bad_policy; 1973 1974 /* build an XP */ 1975 xp = xfrm_policy_construct(net, &ua->policy, attrs, &err); 1976 if (!xp) 1977 goto free_state; 1978 1979 memcpy(&x->id, &ua->id, sizeof(ua->id)); 1980 memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr)); 1981 memcpy(&x->sel, &ua->sel, sizeof(ua->sel)); 1982 xp->mark.m = x->mark.m = mark.m; 1983 xp->mark.v = x->mark.v = mark.v; 1984 ut = nla_data(rt); 1985 /* extract the templates and for each call km_key */ 1986 for (i = 0; i < xp->xfrm_nr; i++, ut++) { 1987 struct xfrm_tmpl *t = &xp->xfrm_vec[i]; 1988 memcpy(&x->id, &t->id, sizeof(x->id)); 1989 x->props.mode = t->mode; 1990 x->props.reqid = t->reqid; 1991 x->props.family = ut->family; 1992 t->aalgos = ua->aalgos; 1993 t->ealgos = ua->ealgos; 1994 t->calgos = ua->calgos; 1995 err = km_query(x, t, xp); 1996 1997 } 1998 1999 kfree(x); 2000 kfree(xp); 2001 2002 return 0; 2003 2004 bad_policy: 2005 WARN(1, "BAD policy passed\n"); 2006 free_state: 2007 kfree(x); 2008 nomem: 2009 return err; 2010 } 2011 2012 #ifdef CONFIG_XFRM_MIGRATE 2013 static int copy_from_user_migrate(struct xfrm_migrate *ma, 2014 struct xfrm_kmaddress *k, 2015 struct nlattr **attrs, int *num) 2016 { 2017 struct nlattr *rt = attrs[XFRMA_MIGRATE]; 2018 struct xfrm_user_migrate *um; 2019 int i, num_migrate; 2020 2021 if (k != NULL) { 2022 struct xfrm_user_kmaddress *uk; 2023 2024 uk = nla_data(attrs[XFRMA_KMADDRESS]); 2025 memcpy(&k->local, &uk->local, sizeof(k->local)); 2026 memcpy(&k->remote, &uk->remote, sizeof(k->remote)); 2027 k->family = uk->family; 2028 k->reserved = uk->reserved; 2029 } 2030 2031 um = nla_data(rt); 2032 num_migrate = nla_len(rt) / sizeof(*um); 2033 2034 if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH) 2035 return -EINVAL; 2036 2037 for (i = 0; i < num_migrate; i++, um++, ma++) { 2038 memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr)); 2039 memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr)); 2040 memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr)); 2041 memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr)); 2042 2043 ma->proto = um->proto; 2044 ma->mode = um->mode; 2045 ma->reqid = um->reqid; 2046 2047 ma->old_family = um->old_family; 2048 ma->new_family = um->new_family; 2049 } 2050 2051 *num = i; 2052 return 0; 2053 } 2054 2055 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 2056 struct nlattr **attrs) 2057 { 2058 struct xfrm_userpolicy_id *pi = nlmsg_data(nlh); 2059 struct xfrm_migrate m[XFRM_MAX_DEPTH]; 2060 struct xfrm_kmaddress km, *kmp; 2061 u8 type; 2062 int err; 2063 int n = 0; 2064 2065 if (attrs[XFRMA_MIGRATE] == NULL) 2066 return -EINVAL; 2067 2068 kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL; 2069 2070 err = copy_from_user_policy_type(&type, attrs); 2071 if (err) 2072 return err; 2073 2074 err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n); 2075 if (err) 2076 return err; 2077 2078 if (!n) 2079 return 0; 2080 2081 xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp); 2082 2083 return 0; 2084 } 2085 #else 2086 static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh, 2087 struct nlattr **attrs) 2088 { 2089 return -ENOPROTOOPT; 2090 } 2091 #endif 2092 2093 #ifdef CONFIG_XFRM_MIGRATE 2094 static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb) 2095 { 2096 struct xfrm_user_migrate um; 2097 2098 memset(&um, 0, sizeof(um)); 2099 um.proto = m->proto; 2100 um.mode = m->mode; 2101 um.reqid = m->reqid; 2102 um.old_family = m->old_family; 2103 memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr)); 2104 memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr)); 2105 um.new_family = m->new_family; 2106 memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr)); 2107 memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr)); 2108 2109 return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um); 2110 } 2111 2112 static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb) 2113 { 2114 struct xfrm_user_kmaddress uk; 2115 2116 memset(&uk, 0, sizeof(uk)); 2117 uk.family = k->family; 2118 uk.reserved = k->reserved; 2119 memcpy(&uk.local, &k->local, sizeof(uk.local)); 2120 memcpy(&uk.remote, &k->remote, sizeof(uk.remote)); 2121 2122 return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk); 2123 } 2124 2125 static inline size_t xfrm_migrate_msgsize(int num_migrate, int with_kma) 2126 { 2127 return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id)) 2128 + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0) 2129 + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate) 2130 + userpolicy_type_attrsize(); 2131 } 2132 2133 static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m, 2134 int num_migrate, const struct xfrm_kmaddress *k, 2135 const struct xfrm_selector *sel, u8 dir, u8 type) 2136 { 2137 const struct xfrm_migrate *mp; 2138 struct xfrm_userpolicy_id *pol_id; 2139 struct nlmsghdr *nlh; 2140 int i; 2141 2142 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0); 2143 if (nlh == NULL) 2144 return -EMSGSIZE; 2145 2146 pol_id = nlmsg_data(nlh); 2147 /* copy data from selector, dir, and type to the pol_id */ 2148 memset(pol_id, 0, sizeof(*pol_id)); 2149 memcpy(&pol_id->sel, sel, sizeof(pol_id->sel)); 2150 pol_id->dir = dir; 2151 2152 if (k != NULL && (copy_to_user_kmaddress(k, skb) < 0)) 2153 goto nlmsg_failure; 2154 2155 if (copy_to_user_policy_type(type, skb) < 0) 2156 goto nlmsg_failure; 2157 2158 for (i = 0, mp = m ; i < num_migrate; i++, mp++) { 2159 if (copy_to_user_migrate(mp, skb) < 0) 2160 goto nlmsg_failure; 2161 } 2162 2163 return nlmsg_end(skb, nlh); 2164 nlmsg_failure: 2165 nlmsg_cancel(skb, nlh); 2166 return -EMSGSIZE; 2167 } 2168 2169 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2170 const struct xfrm_migrate *m, int num_migrate, 2171 const struct xfrm_kmaddress *k) 2172 { 2173 struct net *net = &init_net; 2174 struct sk_buff *skb; 2175 2176 skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k), GFP_ATOMIC); 2177 if (skb == NULL) 2178 return -ENOMEM; 2179 2180 /* build migrate */ 2181 if (build_migrate(skb, m, num_migrate, k, sel, dir, type) < 0) 2182 BUG(); 2183 2184 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MIGRATE, GFP_ATOMIC); 2185 } 2186 #else 2187 static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2188 const struct xfrm_migrate *m, int num_migrate, 2189 const struct xfrm_kmaddress *k) 2190 { 2191 return -ENOPROTOOPT; 2192 } 2193 #endif 2194 2195 #define XMSGSIZE(type) sizeof(struct type) 2196 2197 static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { 2198 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 2199 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 2200 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), 2201 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 2202 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2203 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2204 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), 2205 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), 2206 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), 2207 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), 2208 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), 2209 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), 2210 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), 2211 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0, 2212 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 2213 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id), 2214 [XFRM_MSG_REPORT - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report), 2215 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), 2216 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = sizeof(u32), 2217 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = sizeof(u32), 2218 }; 2219 2220 #undef XMSGSIZE 2221 2222 static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { 2223 [XFRMA_SA] = { .len = sizeof(struct xfrm_usersa_info)}, 2224 [XFRMA_POLICY] = { .len = sizeof(struct xfrm_userpolicy_info)}, 2225 [XFRMA_LASTUSED] = { .type = NLA_U64}, 2226 [XFRMA_ALG_AUTH_TRUNC] = { .len = sizeof(struct xfrm_algo_auth)}, 2227 [XFRMA_ALG_AEAD] = { .len = sizeof(struct xfrm_algo_aead) }, 2228 [XFRMA_ALG_AUTH] = { .len = sizeof(struct xfrm_algo) }, 2229 [XFRMA_ALG_CRYPT] = { .len = sizeof(struct xfrm_algo) }, 2230 [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, 2231 [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, 2232 [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, 2233 [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, 2234 [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, 2235 [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, 2236 [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, 2237 [XFRMA_ETIMER_THRESH] = { .type = NLA_U32 }, 2238 [XFRMA_SRCADDR] = { .len = sizeof(xfrm_address_t) }, 2239 [XFRMA_COADDR] = { .len = sizeof(xfrm_address_t) }, 2240 [XFRMA_POLICY_TYPE] = { .len = sizeof(struct xfrm_userpolicy_type)}, 2241 [XFRMA_MIGRATE] = { .len = sizeof(struct xfrm_user_migrate) }, 2242 [XFRMA_KMADDRESS] = { .len = sizeof(struct xfrm_user_kmaddress) }, 2243 [XFRMA_MARK] = { .len = sizeof(struct xfrm_mark) }, 2244 [XFRMA_TFCPAD] = { .type = NLA_U32 }, 2245 [XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) }, 2246 }; 2247 2248 static struct xfrm_link { 2249 int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); 2250 int (*dump)(struct sk_buff *, struct netlink_callback *); 2251 int (*done)(struct netlink_callback *); 2252 } xfrm_dispatch[XFRM_NR_MSGTYPES] = { 2253 [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 2254 [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, 2255 [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, 2256 .dump = xfrm_dump_sa, 2257 .done = xfrm_dump_sa_done }, 2258 [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2259 [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, 2260 [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, 2261 .dump = xfrm_dump_policy, 2262 .done = xfrm_dump_policy_done }, 2263 [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, 2264 [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire }, 2265 [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire }, 2266 [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, 2267 [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, 2268 [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire}, 2269 [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, 2270 [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, 2271 [XFRM_MSG_NEWAE - XFRM_MSG_BASE] = { .doit = xfrm_new_ae }, 2272 [XFRM_MSG_GETAE - XFRM_MSG_BASE] = { .doit = xfrm_get_ae }, 2273 [XFRM_MSG_MIGRATE - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate }, 2274 [XFRM_MSG_GETSADINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo }, 2275 [XFRM_MSG_GETSPDINFO - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo }, 2276 }; 2277 2278 static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 2279 { 2280 struct net *net = sock_net(skb->sk); 2281 struct nlattr *attrs[XFRMA_MAX+1]; 2282 struct xfrm_link *link; 2283 int type, err; 2284 2285 type = nlh->nlmsg_type; 2286 if (type > XFRM_MSG_MAX) 2287 return -EINVAL; 2288 2289 type -= XFRM_MSG_BASE; 2290 link = &xfrm_dispatch[type]; 2291 2292 /* All operations require privileges, even GET */ 2293 if (!capable(CAP_NET_ADMIN)) 2294 return -EPERM; 2295 2296 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 2297 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 2298 (nlh->nlmsg_flags & NLM_F_DUMP)) { 2299 if (link->dump == NULL) 2300 return -EINVAL; 2301 2302 return netlink_dump_start(net->xfrm.nlsk, skb, nlh, 2303 link->dump, link->done, 0); 2304 } 2305 2306 err = nlmsg_parse(nlh, xfrm_msg_min[type], attrs, XFRMA_MAX, 2307 xfrma_policy); 2308 if (err < 0) 2309 return err; 2310 2311 if (link->doit == NULL) 2312 return -EINVAL; 2313 2314 return link->doit(skb, nlh, attrs); 2315 } 2316 2317 static void xfrm_netlink_rcv(struct sk_buff *skb) 2318 { 2319 mutex_lock(&xfrm_cfg_mutex); 2320 netlink_rcv_skb(skb, &xfrm_user_rcv_msg); 2321 mutex_unlock(&xfrm_cfg_mutex); 2322 } 2323 2324 static inline size_t xfrm_expire_msgsize(void) 2325 { 2326 return NLMSG_ALIGN(sizeof(struct xfrm_user_expire)) 2327 + nla_total_size(sizeof(struct xfrm_mark)); 2328 } 2329 2330 static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c) 2331 { 2332 struct xfrm_user_expire *ue; 2333 struct nlmsghdr *nlh; 2334 2335 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0); 2336 if (nlh == NULL) 2337 return -EMSGSIZE; 2338 2339 ue = nlmsg_data(nlh); 2340 copy_to_user_state(x, &ue->state); 2341 ue->hard = (c->data.hard != 0) ? 1 : 0; 2342 2343 if (xfrm_mark_put(skb, &x->mark)) 2344 goto nla_put_failure; 2345 2346 return nlmsg_end(skb, nlh); 2347 2348 nla_put_failure: 2349 return -EMSGSIZE; 2350 } 2351 2352 static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c) 2353 { 2354 struct net *net = xs_net(x); 2355 struct sk_buff *skb; 2356 2357 skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC); 2358 if (skb == NULL) 2359 return -ENOMEM; 2360 2361 if (build_expire(skb, x, c) < 0) { 2362 kfree_skb(skb); 2363 return -EMSGSIZE; 2364 } 2365 2366 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2367 } 2368 2369 static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c) 2370 { 2371 struct net *net = xs_net(x); 2372 struct sk_buff *skb; 2373 2374 skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC); 2375 if (skb == NULL) 2376 return -ENOMEM; 2377 2378 if (build_aevent(skb, x, c) < 0) 2379 BUG(); 2380 2381 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_AEVENTS, GFP_ATOMIC); 2382 } 2383 2384 static int xfrm_notify_sa_flush(const struct km_event *c) 2385 { 2386 struct net *net = c->net; 2387 struct xfrm_usersa_flush *p; 2388 struct nlmsghdr *nlh; 2389 struct sk_buff *skb; 2390 int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush)); 2391 2392 skb = nlmsg_new(len, GFP_ATOMIC); 2393 if (skb == NULL) 2394 return -ENOMEM; 2395 2396 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0); 2397 if (nlh == NULL) { 2398 kfree_skb(skb); 2399 return -EMSGSIZE; 2400 } 2401 2402 p = nlmsg_data(nlh); 2403 p->proto = c->data.proto; 2404 2405 nlmsg_end(skb, nlh); 2406 2407 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2408 } 2409 2410 static inline size_t xfrm_sa_len(struct xfrm_state *x) 2411 { 2412 size_t l = 0; 2413 if (x->aead) 2414 l += nla_total_size(aead_len(x->aead)); 2415 if (x->aalg) { 2416 l += nla_total_size(sizeof(struct xfrm_algo) + 2417 (x->aalg->alg_key_len + 7) / 8); 2418 l += nla_total_size(xfrm_alg_auth_len(x->aalg)); 2419 } 2420 if (x->ealg) 2421 l += nla_total_size(xfrm_alg_len(x->ealg)); 2422 if (x->calg) 2423 l += nla_total_size(sizeof(*x->calg)); 2424 if (x->encap) 2425 l += nla_total_size(sizeof(*x->encap)); 2426 if (x->tfcpad) 2427 l += nla_total_size(sizeof(x->tfcpad)); 2428 if (x->replay_esn) 2429 l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn)); 2430 if (x->security) 2431 l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) + 2432 x->security->ctx_len); 2433 if (x->coaddr) 2434 l += nla_total_size(sizeof(*x->coaddr)); 2435 2436 /* Must count x->lastused as it may become non-zero behind our back. */ 2437 l += nla_total_size(sizeof(u64)); 2438 2439 return l; 2440 } 2441 2442 static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c) 2443 { 2444 struct net *net = xs_net(x); 2445 struct xfrm_usersa_info *p; 2446 struct xfrm_usersa_id *id; 2447 struct nlmsghdr *nlh; 2448 struct sk_buff *skb; 2449 int len = xfrm_sa_len(x); 2450 int headlen; 2451 2452 headlen = sizeof(*p); 2453 if (c->event == XFRM_MSG_DELSA) { 2454 len += nla_total_size(headlen); 2455 headlen = sizeof(*id); 2456 len += nla_total_size(sizeof(struct xfrm_mark)); 2457 } 2458 len += NLMSG_ALIGN(headlen); 2459 2460 skb = nlmsg_new(len, GFP_ATOMIC); 2461 if (skb == NULL) 2462 return -ENOMEM; 2463 2464 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2465 if (nlh == NULL) 2466 goto nla_put_failure; 2467 2468 p = nlmsg_data(nlh); 2469 if (c->event == XFRM_MSG_DELSA) { 2470 struct nlattr *attr; 2471 2472 id = nlmsg_data(nlh); 2473 memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr)); 2474 id->spi = x->id.spi; 2475 id->family = x->props.family; 2476 id->proto = x->id.proto; 2477 2478 attr = nla_reserve(skb, XFRMA_SA, sizeof(*p)); 2479 if (attr == NULL) 2480 goto nla_put_failure; 2481 2482 p = nla_data(attr); 2483 } 2484 2485 if (copy_to_user_state_extra(x, p, skb)) 2486 goto nla_put_failure; 2487 2488 nlmsg_end(skb, nlh); 2489 2490 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_SA, GFP_ATOMIC); 2491 2492 nla_put_failure: 2493 /* Somebody screwed up with xfrm_sa_len! */ 2494 WARN_ON(1); 2495 kfree_skb(skb); 2496 return -1; 2497 } 2498 2499 static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c) 2500 { 2501 2502 switch (c->event) { 2503 case XFRM_MSG_EXPIRE: 2504 return xfrm_exp_state_notify(x, c); 2505 case XFRM_MSG_NEWAE: 2506 return xfrm_aevent_state_notify(x, c); 2507 case XFRM_MSG_DELSA: 2508 case XFRM_MSG_UPDSA: 2509 case XFRM_MSG_NEWSA: 2510 return xfrm_notify_sa(x, c); 2511 case XFRM_MSG_FLUSHSA: 2512 return xfrm_notify_sa_flush(c); 2513 default: 2514 printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n", 2515 c->event); 2516 break; 2517 } 2518 2519 return 0; 2520 2521 } 2522 2523 static inline size_t xfrm_acquire_msgsize(struct xfrm_state *x, 2524 struct xfrm_policy *xp) 2525 { 2526 return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire)) 2527 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2528 + nla_total_size(sizeof(struct xfrm_mark)) 2529 + nla_total_size(xfrm_user_sec_ctx_size(x->security)) 2530 + userpolicy_type_attrsize(); 2531 } 2532 2533 static int build_acquire(struct sk_buff *skb, struct xfrm_state *x, 2534 struct xfrm_tmpl *xt, struct xfrm_policy *xp, 2535 int dir) 2536 { 2537 struct xfrm_user_acquire *ua; 2538 struct nlmsghdr *nlh; 2539 __u32 seq = xfrm_get_acqseq(); 2540 2541 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0); 2542 if (nlh == NULL) 2543 return -EMSGSIZE; 2544 2545 ua = nlmsg_data(nlh); 2546 memcpy(&ua->id, &x->id, sizeof(ua->id)); 2547 memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr)); 2548 memcpy(&ua->sel, &x->sel, sizeof(ua->sel)); 2549 copy_to_user_policy(xp, &ua->policy, dir); 2550 ua->aalgos = xt->aalgos; 2551 ua->ealgos = xt->ealgos; 2552 ua->calgos = xt->calgos; 2553 ua->seq = x->km.seq = seq; 2554 2555 if (copy_to_user_tmpl(xp, skb) < 0) 2556 goto nlmsg_failure; 2557 if (copy_to_user_state_sec_ctx(x, skb)) 2558 goto nlmsg_failure; 2559 if (copy_to_user_policy_type(xp->type, skb) < 0) 2560 goto nlmsg_failure; 2561 if (xfrm_mark_put(skb, &xp->mark)) 2562 goto nla_put_failure; 2563 2564 return nlmsg_end(skb, nlh); 2565 2566 nla_put_failure: 2567 nlmsg_failure: 2568 nlmsg_cancel(skb, nlh); 2569 return -EMSGSIZE; 2570 } 2571 2572 static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt, 2573 struct xfrm_policy *xp, int dir) 2574 { 2575 struct net *net = xs_net(x); 2576 struct sk_buff *skb; 2577 2578 skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC); 2579 if (skb == NULL) 2580 return -ENOMEM; 2581 2582 if (build_acquire(skb, x, xt, xp, dir) < 0) 2583 BUG(); 2584 2585 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_ACQUIRE, GFP_ATOMIC); 2586 } 2587 2588 /* User gives us xfrm_user_policy_info followed by an array of 0 2589 * or more templates. 2590 */ 2591 static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt, 2592 u8 *data, int len, int *dir) 2593 { 2594 struct net *net = sock_net(sk); 2595 struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data; 2596 struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1); 2597 struct xfrm_policy *xp; 2598 int nr; 2599 2600 switch (sk->sk_family) { 2601 case AF_INET: 2602 if (opt != IP_XFRM_POLICY) { 2603 *dir = -EOPNOTSUPP; 2604 return NULL; 2605 } 2606 break; 2607 #if IS_ENABLED(CONFIG_IPV6) 2608 case AF_INET6: 2609 if (opt != IPV6_XFRM_POLICY) { 2610 *dir = -EOPNOTSUPP; 2611 return NULL; 2612 } 2613 break; 2614 #endif 2615 default: 2616 *dir = -EINVAL; 2617 return NULL; 2618 } 2619 2620 *dir = -EINVAL; 2621 2622 if (len < sizeof(*p) || 2623 verify_newpolicy_info(p)) 2624 return NULL; 2625 2626 nr = ((len - sizeof(*p)) / sizeof(*ut)); 2627 if (validate_tmpl(nr, ut, p->sel.family)) 2628 return NULL; 2629 2630 if (p->dir > XFRM_POLICY_OUT) 2631 return NULL; 2632 2633 xp = xfrm_policy_alloc(net, GFP_ATOMIC); 2634 if (xp == NULL) { 2635 *dir = -ENOBUFS; 2636 return NULL; 2637 } 2638 2639 copy_from_user_policy(xp, p); 2640 xp->type = XFRM_POLICY_TYPE_MAIN; 2641 copy_templates(xp, ut, nr); 2642 2643 *dir = p->dir; 2644 2645 return xp; 2646 } 2647 2648 static inline size_t xfrm_polexpire_msgsize(struct xfrm_policy *xp) 2649 { 2650 return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire)) 2651 + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr) 2652 + nla_total_size(xfrm_user_sec_ctx_size(xp->security)) 2653 + nla_total_size(sizeof(struct xfrm_mark)) 2654 + userpolicy_type_attrsize(); 2655 } 2656 2657 static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp, 2658 int dir, const struct km_event *c) 2659 { 2660 struct xfrm_user_polexpire *upe; 2661 struct nlmsghdr *nlh; 2662 int hard = c->data.hard; 2663 2664 nlh = nlmsg_put(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0); 2665 if (nlh == NULL) 2666 return -EMSGSIZE; 2667 2668 upe = nlmsg_data(nlh); 2669 copy_to_user_policy(xp, &upe->pol, dir); 2670 if (copy_to_user_tmpl(xp, skb) < 0) 2671 goto nlmsg_failure; 2672 if (copy_to_user_sec_ctx(xp, skb)) 2673 goto nlmsg_failure; 2674 if (copy_to_user_policy_type(xp->type, skb) < 0) 2675 goto nlmsg_failure; 2676 if (xfrm_mark_put(skb, &xp->mark)) 2677 goto nla_put_failure; 2678 upe->hard = !!hard; 2679 2680 return nlmsg_end(skb, nlh); 2681 2682 nla_put_failure: 2683 nlmsg_failure: 2684 nlmsg_cancel(skb, nlh); 2685 return -EMSGSIZE; 2686 } 2687 2688 static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2689 { 2690 struct net *net = xp_net(xp); 2691 struct sk_buff *skb; 2692 2693 skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC); 2694 if (skb == NULL) 2695 return -ENOMEM; 2696 2697 if (build_polexpire(skb, xp, dir, c) < 0) 2698 BUG(); 2699 2700 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_EXPIRE, GFP_ATOMIC); 2701 } 2702 2703 static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c) 2704 { 2705 struct net *net = xp_net(xp); 2706 struct xfrm_userpolicy_info *p; 2707 struct xfrm_userpolicy_id *id; 2708 struct nlmsghdr *nlh; 2709 struct sk_buff *skb; 2710 int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2711 int headlen; 2712 2713 headlen = sizeof(*p); 2714 if (c->event == XFRM_MSG_DELPOLICY) { 2715 len += nla_total_size(headlen); 2716 headlen = sizeof(*id); 2717 } 2718 len += userpolicy_type_attrsize(); 2719 len += nla_total_size(sizeof(struct xfrm_mark)); 2720 len += NLMSG_ALIGN(headlen); 2721 2722 skb = nlmsg_new(len, GFP_ATOMIC); 2723 if (skb == NULL) 2724 return -ENOMEM; 2725 2726 nlh = nlmsg_put(skb, c->pid, c->seq, c->event, headlen, 0); 2727 if (nlh == NULL) 2728 goto nlmsg_failure; 2729 2730 p = nlmsg_data(nlh); 2731 if (c->event == XFRM_MSG_DELPOLICY) { 2732 struct nlattr *attr; 2733 2734 id = nlmsg_data(nlh); 2735 memset(id, 0, sizeof(*id)); 2736 id->dir = dir; 2737 if (c->data.byid) 2738 id->index = xp->index; 2739 else 2740 memcpy(&id->sel, &xp->selector, sizeof(id->sel)); 2741 2742 attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p)); 2743 if (attr == NULL) 2744 goto nlmsg_failure; 2745 2746 p = nla_data(attr); 2747 } 2748 2749 copy_to_user_policy(xp, p, dir); 2750 if (copy_to_user_tmpl(xp, skb) < 0) 2751 goto nlmsg_failure; 2752 if (copy_to_user_policy_type(xp->type, skb) < 0) 2753 goto nlmsg_failure; 2754 2755 if (xfrm_mark_put(skb, &xp->mark)) 2756 goto nla_put_failure; 2757 2758 nlmsg_end(skb, nlh); 2759 2760 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2761 2762 nla_put_failure: 2763 nlmsg_failure: 2764 kfree_skb(skb); 2765 return -1; 2766 } 2767 2768 static int xfrm_notify_policy_flush(const struct km_event *c) 2769 { 2770 struct net *net = c->net; 2771 struct nlmsghdr *nlh; 2772 struct sk_buff *skb; 2773 2774 skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC); 2775 if (skb == NULL) 2776 return -ENOMEM; 2777 2778 nlh = nlmsg_put(skb, c->pid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0); 2779 if (nlh == NULL) 2780 goto nlmsg_failure; 2781 if (copy_to_user_policy_type(c->data.type, skb) < 0) 2782 goto nlmsg_failure; 2783 2784 nlmsg_end(skb, nlh); 2785 2786 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_POLICY, GFP_ATOMIC); 2787 2788 nlmsg_failure: 2789 kfree_skb(skb); 2790 return -1; 2791 } 2792 2793 static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c) 2794 { 2795 2796 switch (c->event) { 2797 case XFRM_MSG_NEWPOLICY: 2798 case XFRM_MSG_UPDPOLICY: 2799 case XFRM_MSG_DELPOLICY: 2800 return xfrm_notify_policy(xp, dir, c); 2801 case XFRM_MSG_FLUSHPOLICY: 2802 return xfrm_notify_policy_flush(c); 2803 case XFRM_MSG_POLEXPIRE: 2804 return xfrm_exp_policy_notify(xp, dir, c); 2805 default: 2806 printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n", 2807 c->event); 2808 } 2809 2810 return 0; 2811 2812 } 2813 2814 static inline size_t xfrm_report_msgsize(void) 2815 { 2816 return NLMSG_ALIGN(sizeof(struct xfrm_user_report)); 2817 } 2818 2819 static int build_report(struct sk_buff *skb, u8 proto, 2820 struct xfrm_selector *sel, xfrm_address_t *addr) 2821 { 2822 struct xfrm_user_report *ur; 2823 struct nlmsghdr *nlh; 2824 2825 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0); 2826 if (nlh == NULL) 2827 return -EMSGSIZE; 2828 2829 ur = nlmsg_data(nlh); 2830 ur->proto = proto; 2831 memcpy(&ur->sel, sel, sizeof(ur->sel)); 2832 2833 if (addr) 2834 NLA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); 2835 2836 return nlmsg_end(skb, nlh); 2837 2838 nla_put_failure: 2839 nlmsg_cancel(skb, nlh); 2840 return -EMSGSIZE; 2841 } 2842 2843 static int xfrm_send_report(struct net *net, u8 proto, 2844 struct xfrm_selector *sel, xfrm_address_t *addr) 2845 { 2846 struct sk_buff *skb; 2847 2848 skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC); 2849 if (skb == NULL) 2850 return -ENOMEM; 2851 2852 if (build_report(skb, proto, sel, addr) < 0) 2853 BUG(); 2854 2855 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_REPORT, GFP_ATOMIC); 2856 } 2857 2858 static inline size_t xfrm_mapping_msgsize(void) 2859 { 2860 return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping)); 2861 } 2862 2863 static int build_mapping(struct sk_buff *skb, struct xfrm_state *x, 2864 xfrm_address_t *new_saddr, __be16 new_sport) 2865 { 2866 struct xfrm_user_mapping *um; 2867 struct nlmsghdr *nlh; 2868 2869 nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0); 2870 if (nlh == NULL) 2871 return -EMSGSIZE; 2872 2873 um = nlmsg_data(nlh); 2874 2875 memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr)); 2876 um->id.spi = x->id.spi; 2877 um->id.family = x->props.family; 2878 um->id.proto = x->id.proto; 2879 memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr)); 2880 memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr)); 2881 um->new_sport = new_sport; 2882 um->old_sport = x->encap->encap_sport; 2883 um->reqid = x->props.reqid; 2884 2885 return nlmsg_end(skb, nlh); 2886 } 2887 2888 static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, 2889 __be16 sport) 2890 { 2891 struct net *net = xs_net(x); 2892 struct sk_buff *skb; 2893 2894 if (x->id.proto != IPPROTO_ESP) 2895 return -EINVAL; 2896 2897 if (!x->encap) 2898 return -EINVAL; 2899 2900 skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC); 2901 if (skb == NULL) 2902 return -ENOMEM; 2903 2904 if (build_mapping(skb, x, ipaddr, sport) < 0) 2905 BUG(); 2906 2907 return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC); 2908 } 2909 2910 static struct xfrm_mgr netlink_mgr = { 2911 .id = "netlink", 2912 .notify = xfrm_send_state_notify, 2913 .acquire = xfrm_send_acquire, 2914 .compile_policy = xfrm_compile_policy, 2915 .notify_policy = xfrm_send_policy_notify, 2916 .report = xfrm_send_report, 2917 .migrate = xfrm_send_migrate, 2918 .new_mapping = xfrm_send_mapping, 2919 }; 2920 2921 static int __net_init xfrm_user_net_init(struct net *net) 2922 { 2923 struct sock *nlsk; 2924 2925 nlsk = netlink_kernel_create(net, NETLINK_XFRM, XFRMNLGRP_MAX, 2926 xfrm_netlink_rcv, NULL, THIS_MODULE); 2927 if (nlsk == NULL) 2928 return -ENOMEM; 2929 net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */ 2930 rcu_assign_pointer(net->xfrm.nlsk, nlsk); 2931 return 0; 2932 } 2933 2934 static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list) 2935 { 2936 struct net *net; 2937 list_for_each_entry(net, net_exit_list, exit_list) 2938 RCU_INIT_POINTER(net->xfrm.nlsk, NULL); 2939 synchronize_net(); 2940 list_for_each_entry(net, net_exit_list, exit_list) 2941 netlink_kernel_release(net->xfrm.nlsk_stash); 2942 } 2943 2944 static struct pernet_operations xfrm_user_net_ops = { 2945 .init = xfrm_user_net_init, 2946 .exit_batch = xfrm_user_net_exit, 2947 }; 2948 2949 static int __init xfrm_user_init(void) 2950 { 2951 int rv; 2952 2953 printk(KERN_INFO "Initializing XFRM netlink socket\n"); 2954 2955 rv = register_pernet_subsys(&xfrm_user_net_ops); 2956 if (rv < 0) 2957 return rv; 2958 rv = xfrm_register_km(&netlink_mgr); 2959 if (rv < 0) 2960 unregister_pernet_subsys(&xfrm_user_net_ops); 2961 return rv; 2962 } 2963 2964 static void __exit xfrm_user_exit(void) 2965 { 2966 xfrm_unregister_km(&netlink_mgr); 2967 unregister_pernet_subsys(&xfrm_user_net_ops); 2968 } 2969 2970 module_init(xfrm_user_init); 2971 module_exit(xfrm_user_exit); 2972 MODULE_LICENSE("GPL"); 2973 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM); 2974 2975