1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Ng Peng Nam Sean 5 * Copyright (c) 2022 Alexander V. Chernikov <melifaro@FreeBSD.org> 6 * Copyright (c) 2023 Gleb Smirnoff <glebius@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * This file contains socket and protocol bindings for netlink. 32 */ 33 34 #include <sys/param.h> 35 #include <sys/kernel.h> 36 #include <sys/malloc.h> 37 #include <sys/lock.h> 38 #include <sys/rmlock.h> 39 #include <sys/domain.h> 40 #include <sys/jail.h> 41 #include <sys/mbuf.h> 42 #include <sys/osd.h> 43 #include <sys/protosw.h> 44 #include <sys/proc.h> 45 #include <sys/ck.h> 46 #include <sys/socket.h> 47 #include <sys/socketvar.h> 48 #include <sys/sysent.h> 49 #include <sys/syslog.h> 50 #include <sys/priv.h> /* priv_check */ 51 #include <sys/uio.h> 52 53 #include <netlink/netlink.h> 54 #include <netlink/netlink_ctl.h> 55 #include <netlink/netlink_var.h> 56 57 #define DEBUG_MOD_NAME nl_domain 58 #define DEBUG_MAX_LEVEL LOG_DEBUG3 59 #include <netlink/netlink_debug.h> 60 _DECLARE_DEBUG(LOG_INFO); 61 62 _Static_assert((NLP_MAX_GROUPS % 64) == 0, 63 "NLP_MAX_GROUPS has to be multiple of 64"); 64 _Static_assert(NLP_MAX_GROUPS >= 64, 65 "NLP_MAX_GROUPS has to be at least 64"); 66 67 #define NLCTL_TRACKER struct rm_priotracker nl_tracker 68 #define NLCTL_RLOCK(_ctl) rm_rlock(&((_ctl)->ctl_lock), &nl_tracker) 69 #define NLCTL_RUNLOCK(_ctl) rm_runlock(&((_ctl)->ctl_lock), &nl_tracker) 70 71 #define NLCTL_WLOCK(_ctl) rm_wlock(&((_ctl)->ctl_lock)) 72 #define NLCTL_WUNLOCK(_ctl) rm_wunlock(&((_ctl)->ctl_lock)) 73 74 static u_long nl_sendspace = NLSNDQ; 75 SYSCTL_ULONG(_net_netlink, OID_AUTO, sendspace, CTLFLAG_RW, &nl_sendspace, 0, 76 "Default netlink socket send space"); 77 78 static u_long nl_recvspace = NLSNDQ; 79 SYSCTL_ULONG(_net_netlink, OID_AUTO, recvspace, CTLFLAG_RW, &nl_recvspace, 0, 80 "Default netlink socket receive space"); 81 82 extern u_long sb_max_adj; 83 static u_long nl_maxsockbuf = 512 * 1024 * 1024; /* 512M, XXX: init based on physmem */ 84 static int sysctl_handle_nl_maxsockbuf(SYSCTL_HANDLER_ARGS); 85 SYSCTL_OID(_net_netlink, OID_AUTO, nl_maxsockbuf, 86 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, &nl_maxsockbuf, 0, 87 sysctl_handle_nl_maxsockbuf, "LU", 88 "Maximum Netlink socket buffer size"); 89 90 91 static unsigned int osd_slot_id = 0; 92 93 void 94 nl_osd_register(void) 95 { 96 osd_slot_id = osd_register(OSD_THREAD, NULL, NULL); 97 } 98 99 void 100 nl_osd_unregister(void) 101 { 102 osd_deregister(OSD_THREAD, osd_slot_id); 103 } 104 105 struct nlpcb * 106 _nl_get_thread_nlp(struct thread *td) 107 { 108 return (osd_get(OSD_THREAD, &td->td_osd, osd_slot_id)); 109 } 110 111 void 112 nl_set_thread_nlp(struct thread *td, struct nlpcb *nlp) 113 { 114 NLP_LOG(LOG_DEBUG2, nlp, "Set thread %p nlp to %p (slot %u)", td, nlp, osd_slot_id); 115 if (osd_set(OSD_THREAD, &td->td_osd, osd_slot_id, nlp) == 0) 116 return; 117 /* Failed, need to realloc */ 118 void **rsv = osd_reserve(osd_slot_id); 119 osd_set_reserved(OSD_THREAD, &td->td_osd, osd_slot_id, rsv, nlp); 120 } 121 122 /* 123 * Looks up a nlpcb struct based on the @portid. Need to claim nlsock_mtx. 124 * Returns nlpcb pointer if present else NULL 125 */ 126 static struct nlpcb * 127 nl_port_lookup(uint32_t port_id) 128 { 129 struct nlpcb *nlp; 130 131 CK_LIST_FOREACH(nlp, &V_nl_ctl->ctl_port_head, nl_port_next) { 132 if (nlp->nl_port == port_id) 133 return (nlp); 134 } 135 return (NULL); 136 } 137 138 static void 139 nl_add_group_locked(struct nlpcb *nlp, unsigned int group_id) 140 { 141 MPASS(group_id <= NLP_MAX_GROUPS); 142 --group_id; 143 144 /* TODO: add family handler callback */ 145 if (!nlp_unconstrained_vnet(nlp)) 146 return; 147 148 nlp->nl_groups[group_id / 64] |= (uint64_t)1 << (group_id % 64); 149 } 150 151 static void 152 nl_del_group_locked(struct nlpcb *nlp, unsigned int group_id) 153 { 154 MPASS(group_id <= NLP_MAX_GROUPS); 155 --group_id; 156 157 nlp->nl_groups[group_id / 64] &= ~((uint64_t)1 << (group_id % 64)); 158 } 159 160 static bool 161 nl_isset_group_locked(struct nlpcb *nlp, unsigned int group_id) 162 { 163 MPASS(group_id <= NLP_MAX_GROUPS); 164 --group_id; 165 166 return (nlp->nl_groups[group_id / 64] & ((uint64_t)1 << (group_id % 64))); 167 } 168 169 static uint32_t 170 nl_get_groups_compat(struct nlpcb *nlp) 171 { 172 uint32_t groups_mask = 0; 173 174 for (int i = 0; i < 32; i++) { 175 if (nl_isset_group_locked(nlp, i + 1)) 176 groups_mask |= (1 << i); 177 } 178 179 return (groups_mask); 180 } 181 182 static struct nl_buf * 183 nl_buf_copy(struct nl_buf *nb) 184 { 185 struct nl_buf *copy; 186 187 copy = nl_buf_alloc(nb->buflen, M_NOWAIT); 188 if (__predict_false(copy == NULL)) 189 return (NULL); 190 memcpy(copy, nb, sizeof(*nb) + nb->buflen); 191 192 return (copy); 193 } 194 195 /* 196 * Broadcasts in the writer's buffer. 197 */ 198 bool 199 nl_send_group(struct nl_writer *nw) 200 { 201 struct nl_buf *nb = nw->buf; 202 struct nlpcb *nlp_last = NULL; 203 struct nlpcb *nlp; 204 NLCTL_TRACKER; 205 206 IF_DEBUG_LEVEL(LOG_DEBUG2) { 207 struct nlmsghdr *hdr = (struct nlmsghdr *)nb->data; 208 NL_LOG(LOG_DEBUG2, "MCAST len %u msg type %d len %u to group %d/%d", 209 nb->datalen, hdr->nlmsg_type, hdr->nlmsg_len, 210 nw->group.proto, nw->group.id); 211 } 212 213 nw->buf = NULL; 214 215 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); 216 if (__predict_false(ctl == NULL)) { 217 /* 218 * Can be the case when notification is sent within VNET 219 * which doesn't have any netlink sockets. 220 */ 221 nl_buf_free(nb); 222 return (false); 223 } 224 225 NLCTL_RLOCK(ctl); 226 227 CK_LIST_FOREACH(nlp, &ctl->ctl_pcb_head, nl_next) { 228 if (nl_isset_group_locked(nlp, nw->group.id) && 229 nlp->nl_proto == nw->group.proto) { 230 if (nlp_last != NULL) { 231 struct nl_buf *copy; 232 233 copy = nl_buf_copy(nb); 234 if (copy != NULL) { 235 nw->buf = copy; 236 (void)nl_send(nw, nlp_last); 237 } else { 238 NLP_LOCK(nlp_last); 239 if (nlp_last->nl_socket != NULL) 240 sorwakeup(nlp_last->nl_socket); 241 NLP_UNLOCK(nlp_last); 242 } 243 } 244 nlp_last = nlp; 245 } 246 } 247 if (nlp_last != NULL) { 248 nw->buf = nb; 249 (void)nl_send(nw, nlp_last); 250 } else 251 nl_buf_free(nb); 252 253 NLCTL_RUNLOCK(ctl); 254 255 return (true); 256 } 257 258 bool 259 nl_has_listeners(int netlink_family, uint32_t groups_mask) 260 { 261 return (V_nl_ctl != NULL); 262 } 263 264 static uint32_t 265 nl_find_port(void) 266 { 267 /* 268 * app can open multiple netlink sockets. 269 * Start with current pid, if already taken, 270 * try random numbers in 65k..256k+65k space, 271 * avoiding clash with pids. 272 */ 273 if (nl_port_lookup(curproc->p_pid) == NULL) 274 return (curproc->p_pid); 275 for (int i = 0; i < 16; i++) { 276 uint32_t nl_port = (arc4random() % 65536) + 65536 * 4; 277 if (nl_port_lookup(nl_port) == 0) 278 return (nl_port); 279 NL_LOG(LOG_DEBUG3, "tried %u\n", nl_port); 280 } 281 return (curproc->p_pid); 282 } 283 284 static int 285 nl_bind_locked(struct nlpcb *nlp, struct sockaddr_nl *snl) 286 { 287 if (nlp->nl_bound) { 288 if (nlp->nl_port != snl->nl_pid) { 289 NL_LOG(LOG_DEBUG, 290 "bind() failed: program pid %d " 291 "is different from provided pid %d", 292 nlp->nl_port, snl->nl_pid); 293 return (EINVAL); // XXX: better error 294 } 295 } else { 296 if (snl->nl_pid == 0) 297 snl->nl_pid = nl_find_port(); 298 if (nl_port_lookup(snl->nl_pid) != NULL) 299 return (EADDRINUSE); 300 nlp->nl_port = snl->nl_pid; 301 nlp->nl_bound = true; 302 CK_LIST_INSERT_HEAD(&V_nl_ctl->ctl_port_head, nlp, nl_port_next); 303 } 304 for (int i = 0; i < 32; i++) { 305 if (snl->nl_groups & ((uint32_t)1 << i)) 306 nl_add_group_locked(nlp, i + 1); 307 else 308 nl_del_group_locked(nlp, i + 1); 309 } 310 311 return (0); 312 } 313 314 static int 315 nl_pru_attach(struct socket *so, int proto, struct thread *td) 316 { 317 struct nlpcb *nlp; 318 int error; 319 320 if (__predict_false(netlink_unloading != 0)) 321 return (EAFNOSUPPORT); 322 323 error = nl_verify_proto(proto); 324 if (error != 0) 325 return (error); 326 327 bool is_linux = SV_PROC_ABI(td->td_proc) == SV_ABI_LINUX; 328 NL_LOG(LOG_DEBUG2, "socket %p, %sPID %d: attaching socket to %s", 329 so, is_linux ? "(linux) " : "", curproc->p_pid, 330 nl_get_proto_name(proto)); 331 332 /* Create per-VNET state on first socket init */ 333 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); 334 if (ctl == NULL) 335 ctl = vnet_nl_ctl_init(); 336 KASSERT(V_nl_ctl != NULL, ("nl_attach: vnet_sock_init() failed")); 337 338 MPASS(sotonlpcb(so) == NULL); 339 340 nlp = malloc(sizeof(struct nlpcb), M_PCB, M_WAITOK | M_ZERO); 341 error = soreserve(so, nl_sendspace, nl_recvspace); 342 if (error != 0) { 343 free(nlp, M_PCB); 344 return (error); 345 } 346 TAILQ_INIT(&so->so_rcv.nl_queue); 347 TAILQ_INIT(&so->so_snd.nl_queue); 348 so->so_pcb = nlp; 349 nlp->nl_socket = so; 350 /* Copy so_cred to avoid having socket_var.h in every header */ 351 nlp->nl_cred = so->so_cred; 352 nlp->nl_proto = proto; 353 nlp->nl_process_id = curproc->p_pid; 354 nlp->nl_linux = is_linux; 355 nlp->nl_unconstrained_vnet = !jailed_without_vnet(so->so_cred); 356 nlp->nl_need_thread_setup = true; 357 NLP_LOCK_INIT(nlp); 358 refcount_init(&nlp->nl_refcount, 1); 359 360 nlp->nl_taskqueue = taskqueue_create("netlink_socket", M_WAITOK, 361 taskqueue_thread_enqueue, &nlp->nl_taskqueue); 362 TASK_INIT(&nlp->nl_task, 0, nl_taskqueue_handler, nlp); 363 taskqueue_start_threads(&nlp->nl_taskqueue, 1, PWAIT, 364 "netlink_socket (PID %u)", nlp->nl_process_id); 365 366 NLCTL_WLOCK(ctl); 367 /* XXX: check ctl is still alive */ 368 CK_LIST_INSERT_HEAD(&ctl->ctl_pcb_head, nlp, nl_next); 369 NLCTL_WUNLOCK(ctl); 370 371 soisconnected(so); 372 373 return (0); 374 } 375 376 static int 377 nl_pru_bind(struct socket *so, struct sockaddr *sa, struct thread *td) 378 { 379 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); 380 struct nlpcb *nlp = sotonlpcb(so); 381 struct sockaddr_nl *snl = (struct sockaddr_nl *)sa; 382 int error; 383 384 NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); 385 if (snl->nl_len != sizeof(*snl)) { 386 NL_LOG(LOG_DEBUG, "socket %p, wrong sizeof(), ignoring bind()", so); 387 return (EINVAL); 388 } 389 390 391 NLCTL_WLOCK(ctl); 392 NLP_LOCK(nlp); 393 error = nl_bind_locked(nlp, snl); 394 NLP_UNLOCK(nlp); 395 NLCTL_WUNLOCK(ctl); 396 NL_LOG(LOG_DEBUG2, "socket %p, bind() to %u, groups %u, error %d", so, 397 snl->nl_pid, snl->nl_groups, error); 398 399 return (error); 400 } 401 402 403 static int 404 nl_assign_port(struct nlpcb *nlp, uint32_t port_id) 405 { 406 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); 407 struct sockaddr_nl snl = { 408 .nl_pid = port_id, 409 }; 410 int error; 411 412 NLCTL_WLOCK(ctl); 413 NLP_LOCK(nlp); 414 snl.nl_groups = nl_get_groups_compat(nlp); 415 error = nl_bind_locked(nlp, &snl); 416 NLP_UNLOCK(nlp); 417 NLCTL_WUNLOCK(ctl); 418 419 NL_LOG(LOG_DEBUG3, "socket %p, port assign: %d, error: %d", nlp->nl_socket, port_id, error); 420 return (error); 421 } 422 423 /* 424 * nl_autobind_port binds a unused portid to @nlp 425 * @nlp: pcb data for the netlink socket 426 * @candidate_id: first id to consider 427 */ 428 static int 429 nl_autobind_port(struct nlpcb *nlp, uint32_t candidate_id) 430 { 431 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); 432 uint32_t port_id = candidate_id; 433 NLCTL_TRACKER; 434 bool exist; 435 int error = EADDRINUSE; 436 437 for (int i = 0; i < 10; i++) { 438 NL_LOG(LOG_DEBUG3, "socket %p, trying to assign port %d", nlp->nl_socket, port_id); 439 NLCTL_RLOCK(ctl); 440 exist = nl_port_lookup(port_id) != 0; 441 NLCTL_RUNLOCK(ctl); 442 if (!exist) { 443 error = nl_assign_port(nlp, port_id); 444 if (error != EADDRINUSE) 445 break; 446 } 447 port_id++; 448 } 449 NL_LOG(LOG_DEBUG3, "socket %p, autobind to %d, error: %d", nlp->nl_socket, port_id, error); 450 return (error); 451 } 452 453 static int 454 nl_pru_connect(struct socket *so, struct sockaddr *sa, struct thread *td) 455 { 456 struct sockaddr_nl *snl = (struct sockaddr_nl *)sa; 457 struct nlpcb *nlp; 458 459 NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); 460 if (snl->nl_len != sizeof(*snl)) { 461 NL_LOG(LOG_DEBUG, "socket %p, wrong sizeof(), ignoring bind()", so); 462 return (EINVAL); 463 } 464 465 nlp = sotonlpcb(so); 466 if (!nlp->nl_bound) { 467 int error = nl_autobind_port(nlp, td->td_proc->p_pid); 468 if (error != 0) { 469 NL_LOG(LOG_DEBUG, "socket %p, nl_autobind() failed: %d", so, error); 470 return (error); 471 } 472 } 473 /* XXX: Handle socket flags & multicast */ 474 soisconnected(so); 475 476 NL_LOG(LOG_DEBUG2, "socket %p, connect to %u", so, snl->nl_pid); 477 478 return (0); 479 } 480 481 static void 482 destroy_nlpcb_epoch(epoch_context_t ctx) 483 { 484 struct nlpcb *nlp; 485 486 nlp = __containerof(ctx, struct nlpcb, nl_epoch_ctx); 487 488 NLP_LOCK_DESTROY(nlp); 489 free(nlp, M_PCB); 490 } 491 492 static void 493 nl_close(struct socket *so) 494 { 495 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); 496 MPASS(sotonlpcb(so) != NULL); 497 struct nlpcb *nlp; 498 struct nl_buf *nb; 499 500 NL_LOG(LOG_DEBUG2, "detaching socket %p, PID %d", so, curproc->p_pid); 501 nlp = sotonlpcb(so); 502 503 /* Mark as inactive so no new work can be enqueued */ 504 NLP_LOCK(nlp); 505 bool was_bound = nlp->nl_bound; 506 NLP_UNLOCK(nlp); 507 508 /* Wait till all scheduled work has been completed */ 509 taskqueue_drain_all(nlp->nl_taskqueue); 510 taskqueue_free(nlp->nl_taskqueue); 511 512 NLCTL_WLOCK(ctl); 513 NLP_LOCK(nlp); 514 if (was_bound) { 515 CK_LIST_REMOVE(nlp, nl_port_next); 516 NL_LOG(LOG_DEBUG3, "socket %p, unlinking bound pid %u", so, nlp->nl_port); 517 } 518 CK_LIST_REMOVE(nlp, nl_next); 519 nlp->nl_socket = NULL; 520 NLP_UNLOCK(nlp); 521 NLCTL_WUNLOCK(ctl); 522 523 so->so_pcb = NULL; 524 525 while ((nb = TAILQ_FIRST(&so->so_snd.nl_queue)) != NULL) { 526 TAILQ_REMOVE(&so->so_snd.nl_queue, nb, tailq); 527 nl_buf_free(nb); 528 } 529 while ((nb = TAILQ_FIRST(&so->so_rcv.nl_queue)) != NULL) { 530 TAILQ_REMOVE(&so->so_rcv.nl_queue, nb, tailq); 531 nl_buf_free(nb); 532 } 533 534 NL_LOG(LOG_DEBUG3, "socket %p, detached", so); 535 536 /* XXX: is delayed free needed? */ 537 NET_EPOCH_CALL(destroy_nlpcb_epoch, &nlp->nl_epoch_ctx); 538 } 539 540 static int 541 nl_pru_disconnect(struct socket *so) 542 { 543 NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); 544 MPASS(sotonlpcb(so) != NULL); 545 return (ENOTCONN); 546 } 547 548 static int 549 nl_sockaddr(struct socket *so, struct sockaddr *sa) 550 { 551 552 *(struct sockaddr_nl *)sa = (struct sockaddr_nl ){ 553 /* TODO: set other fields */ 554 .nl_len = sizeof(struct sockaddr_nl), 555 .nl_family = AF_NETLINK, 556 .nl_pid = sotonlpcb(so)->nl_port, 557 }; 558 559 return (0); 560 } 561 562 static int 563 nl_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio, 564 struct mbuf *m, struct mbuf *control, int flags, struct thread *td) 565 { 566 struct nlpcb *nlp = sotonlpcb(so); 567 struct sockbuf *sb = &so->so_snd; 568 struct nl_buf *nb; 569 u_int len; 570 int error; 571 572 MPASS(m == NULL && uio != NULL); 573 574 NL_LOG(LOG_DEBUG2, "sending message to kernel"); 575 576 if (__predict_false(control != NULL)) { 577 m_freem(control); 578 return (EINVAL); 579 } 580 581 if (__predict_false(flags & MSG_OOB)) /* XXXGL: or just ignore? */ 582 return (EOPNOTSUPP); 583 584 if (__predict_false(uio->uio_resid < sizeof(struct nlmsghdr))) 585 return (ENOBUFS); /* XXXGL: any better error? */ 586 587 NL_LOG(LOG_DEBUG3, "sending message to kernel async processing"); 588 589 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags)); 590 if (error) 591 return (error); 592 593 len = roundup2(uio->uio_resid, 8) + SCRATCH_BUFFER_SIZE; 594 if (nlp->nl_linux) 595 len += roundup2(uio->uio_resid, 8); 596 nb = nl_buf_alloc(len, M_WAITOK); 597 nb->datalen = uio->uio_resid; 598 error = uiomove(&nb->data[0], uio->uio_resid, uio); 599 if (__predict_false(error)) 600 goto out; 601 602 SOCK_SENDBUF_LOCK(so); 603 restart: 604 if (sb->sb_hiwat - sb->sb_ccc >= nb->datalen) { 605 TAILQ_INSERT_TAIL(&sb->nl_queue, nb, tailq); 606 sb->sb_acc += nb->datalen; 607 sb->sb_ccc += nb->datalen; 608 nb = NULL; 609 } else if ((so->so_state & SS_NBIO) || 610 (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) { 611 SOCK_SENDBUF_UNLOCK(so); 612 error = EWOULDBLOCK; 613 goto out; 614 } else { 615 if ((error = sbwait(so, SO_SND)) != 0) { 616 SOCK_SENDBUF_UNLOCK(so); 617 goto out; 618 } else 619 goto restart; 620 } 621 SOCK_SENDBUF_UNLOCK(so); 622 623 if (nb == NULL) { 624 NL_LOG(LOG_DEBUG3, "enqueue %u bytes", nb->datalen); 625 NLP_LOCK(nlp); 626 nl_schedule_taskqueue(nlp); 627 NLP_UNLOCK(nlp); 628 } 629 630 out: 631 SOCK_IO_SEND_UNLOCK(so); 632 if (nb != NULL) 633 nl_buf_free(nb); 634 return (error); 635 } 636 637 /* Create control data for recvmsg(2) on Netlink socket. */ 638 static struct mbuf * 639 nl_createcontrol(struct nlpcb *nlp) 640 { 641 struct { 642 struct nlattr nla; 643 uint32_t val; 644 } data[] = { 645 { 646 .nla.nla_len = sizeof(struct nlattr) + sizeof(uint32_t), 647 .nla.nla_type = NLMSGINFO_ATTR_PROCESS_ID, 648 .val = nlp->nl_process_id, 649 }, 650 { 651 .nla.nla_len = sizeof(struct nlattr) + sizeof(uint32_t), 652 .nla.nla_type = NLMSGINFO_ATTR_PORT_ID, 653 .val = nlp->nl_port, 654 }, 655 }; 656 657 return (sbcreatecontrol(data, sizeof(data), NETLINK_MSG_INFO, 658 SOL_NETLINK, M_WAITOK)); 659 } 660 661 static int 662 nl_soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio, 663 struct mbuf **mp, struct mbuf **controlp, int *flagsp) 664 { 665 static const struct sockaddr_nl nl_empty_src = { 666 .nl_len = sizeof(struct sockaddr_nl), 667 .nl_family = PF_NETLINK, 668 .nl_pid = 0 /* comes from the kernel */ 669 }; 670 struct sockbuf *sb = &so->so_rcv; 671 struct nlpcb *nlp = sotonlpcb(so); 672 struct nl_buf *first, *last, *nb, *next; 673 struct nlmsghdr *hdr; 674 int flags, error; 675 u_int len, overflow, partoff, partlen, msgrcv, datalen; 676 bool nonblock, trunc, peek; 677 678 MPASS(mp == NULL && uio != NULL); 679 680 NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid); 681 682 if (psa != NULL) 683 *psa = sodupsockaddr((const struct sockaddr *)&nl_empty_src, 684 M_WAITOK); 685 686 if (controlp != NULL && (nlp->nl_flags & NLF_MSG_INFO)) 687 *controlp = nl_createcontrol(nlp); 688 689 flags = flagsp != NULL ? *flagsp & ~MSG_TRUNC : 0; 690 trunc = flagsp != NULL ? *flagsp & MSG_TRUNC : false; 691 nonblock = (so->so_state & SS_NBIO) || 692 (flags & (MSG_DONTWAIT | MSG_NBIO)); 693 peek = flags & MSG_PEEK; 694 695 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags)); 696 if (__predict_false(error)) 697 return (error); 698 699 len = 0; 700 overflow = 0; 701 msgrcv = 0; 702 datalen = 0; 703 704 SOCK_RECVBUF_LOCK(so); 705 while ((first = TAILQ_FIRST(&sb->nl_queue)) == NULL) { 706 if (nonblock) { 707 SOCK_RECVBUF_UNLOCK(so); 708 SOCK_IO_RECV_UNLOCK(so); 709 return (EWOULDBLOCK); 710 } 711 error = sbwait(so, SO_RCV); 712 if (error) { 713 SOCK_RECVBUF_UNLOCK(so); 714 SOCK_IO_RECV_UNLOCK(so); 715 return (error); 716 } 717 } 718 719 /* 720 * Netlink socket buffer consists of a queue of nl_bufs, but for the 721 * userland there should be no boundaries. However, there are Netlink 722 * messages, that shouldn't be split. Internal invariant is that a 723 * message never spans two nl_bufs. 724 * If a large userland buffer is provided, we would traverse the queue 725 * until either queue end is reached or the buffer is fulfilled. If 726 * an application provides a buffer that isn't able to fit a single 727 * message, we would truncate it and lose its tail. This is the only 728 * condition where we would lose data. If buffer is able to fit at 729 * least one message, we would return it and won't truncate the next. 730 * 731 * We use same code for normal and MSG_PEEK case. At first queue pass 732 * we scan nl_bufs and count lenght. In case we can read entire buffer 733 * at one write everything is trivial. In case we can not, we save 734 * pointer to the last (or partial) nl_buf and in the !peek case we 735 * split the queue into two pieces. We can safely drop the queue lock, 736 * as kernel would only append nl_bufs to the end of the queue, and 737 * we are the exclusive owner of queue beginning due to sleepable lock. 738 * At the second pass we copy data out and in !peek case free nl_bufs. 739 */ 740 TAILQ_FOREACH(nb, &sb->nl_queue, tailq) { 741 u_int offset; 742 743 MPASS(nb->offset < nb->datalen); 744 offset = nb->offset; 745 while (offset < nb->datalen) { 746 hdr = (struct nlmsghdr *)&nb->data[offset]; 747 MPASS(nb->offset + hdr->nlmsg_len <= nb->datalen); 748 if (uio->uio_resid < len + hdr->nlmsg_len) { 749 overflow = len + hdr->nlmsg_len - 750 uio->uio_resid; 751 partoff = nb->offset; 752 if (offset > partoff) { 753 partlen = offset - partoff; 754 if (!peek) { 755 nb->offset = offset; 756 datalen += partlen; 757 } 758 } else if (len == 0 && uio->uio_resid > 0) { 759 flags |= MSG_TRUNC; 760 partlen = uio->uio_resid; 761 if (peek) 762 goto nospace; 763 datalen += hdr->nlmsg_len; 764 if (nb->offset + hdr->nlmsg_len == 765 nb->datalen) { 766 /* 767 * Avoid leaving empty nb. 768 * Process last nb normally. 769 * Trust uiomove() to care 770 * about negative uio_resid. 771 */ 772 nb = TAILQ_NEXT(nb, tailq); 773 overflow = 0; 774 partlen = 0; 775 } else 776 nb->offset += hdr->nlmsg_len; 777 msgrcv++; 778 } else 779 partlen = 0; 780 goto nospace; 781 } 782 len += hdr->nlmsg_len; 783 offset += hdr->nlmsg_len; 784 MPASS(offset <= nb->buflen); 785 msgrcv++; 786 } 787 MPASS(offset == nb->datalen); 788 datalen += nb->datalen - nb->offset; 789 } 790 nospace: 791 last = nb; 792 if (!peek) { 793 if (last == NULL) 794 TAILQ_INIT(&sb->nl_queue); 795 else { 796 /* XXXGL: create TAILQ_SPLIT */ 797 TAILQ_FIRST(&sb->nl_queue) = last; 798 last->tailq.tqe_prev = &TAILQ_FIRST(&sb->nl_queue); 799 } 800 MPASS(sb->sb_acc >= datalen); 801 sb->sb_acc -= datalen; 802 sb->sb_ccc -= datalen; 803 } 804 SOCK_RECVBUF_UNLOCK(so); 805 806 for (nb = first; nb != last; nb = next) { 807 next = TAILQ_NEXT(nb, tailq); 808 if (__predict_true(error == 0)) 809 error = uiomove(&nb->data[nb->offset], 810 (int)(nb->datalen - nb->offset), uio); 811 if (!peek) 812 nl_buf_free(nb); 813 } 814 if (last != NULL && partlen > 0 && __predict_true(error == 0)) 815 error = uiomove(&nb->data[partoff], (int)partlen, uio); 816 817 if (trunc && overflow > 0) { 818 uio->uio_resid -= overflow; 819 MPASS(uio->uio_resid < 0); 820 } else 821 MPASS(uio->uio_resid >= 0); 822 823 if (uio->uio_td) 824 uio->uio_td->td_ru.ru_msgrcv += msgrcv; 825 826 if (flagsp != NULL) 827 *flagsp |= flags; 828 829 SOCK_IO_RECV_UNLOCK(so); 830 831 nl_on_transmit(sotonlpcb(so)); 832 833 return (error); 834 } 835 836 static int 837 nl_getoptflag(int sopt_name) 838 { 839 switch (sopt_name) { 840 case NETLINK_CAP_ACK: 841 return (NLF_CAP_ACK); 842 case NETLINK_EXT_ACK: 843 return (NLF_EXT_ACK); 844 case NETLINK_GET_STRICT_CHK: 845 return (NLF_STRICT); 846 case NETLINK_MSG_INFO: 847 return (NLF_MSG_INFO); 848 } 849 850 return (0); 851 } 852 853 static int 854 nl_ctloutput(struct socket *so, struct sockopt *sopt) 855 { 856 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl); 857 struct nlpcb *nlp = sotonlpcb(so); 858 uint32_t flag; 859 int optval, error = 0; 860 NLCTL_TRACKER; 861 862 NL_LOG(LOG_DEBUG2, "%ssockopt(%p, %d)", (sopt->sopt_dir) ? "set" : "get", 863 so, sopt->sopt_name); 864 865 switch (sopt->sopt_dir) { 866 case SOPT_SET: 867 switch (sopt->sopt_name) { 868 case NETLINK_ADD_MEMBERSHIP: 869 case NETLINK_DROP_MEMBERSHIP: 870 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 871 if (error != 0) 872 break; 873 if (optval <= 0 || optval >= NLP_MAX_GROUPS) { 874 error = ERANGE; 875 break; 876 } 877 NL_LOG(LOG_DEBUG2, "ADD/DEL group %d", (uint32_t)optval); 878 879 NLCTL_WLOCK(ctl); 880 if (sopt->sopt_name == NETLINK_ADD_MEMBERSHIP) 881 nl_add_group_locked(nlp, optval); 882 else 883 nl_del_group_locked(nlp, optval); 884 NLCTL_WUNLOCK(ctl); 885 break; 886 case NETLINK_CAP_ACK: 887 case NETLINK_EXT_ACK: 888 case NETLINK_GET_STRICT_CHK: 889 case NETLINK_MSG_INFO: 890 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); 891 if (error != 0) 892 break; 893 894 flag = nl_getoptflag(sopt->sopt_name); 895 896 if ((flag == NLF_MSG_INFO) && nlp->nl_linux) { 897 error = EINVAL; 898 break; 899 } 900 901 NLCTL_WLOCK(ctl); 902 if (optval != 0) 903 nlp->nl_flags |= flag; 904 else 905 nlp->nl_flags &= ~flag; 906 NLCTL_WUNLOCK(ctl); 907 break; 908 default: 909 error = ENOPROTOOPT; 910 } 911 break; 912 case SOPT_GET: 913 switch (sopt->sopt_name) { 914 case NETLINK_LIST_MEMBERSHIPS: 915 NLCTL_RLOCK(ctl); 916 optval = nl_get_groups_compat(nlp); 917 NLCTL_RUNLOCK(ctl); 918 error = sooptcopyout(sopt, &optval, sizeof(optval)); 919 break; 920 case NETLINK_CAP_ACK: 921 case NETLINK_EXT_ACK: 922 case NETLINK_GET_STRICT_CHK: 923 case NETLINK_MSG_INFO: 924 NLCTL_RLOCK(ctl); 925 optval = (nlp->nl_flags & nl_getoptflag(sopt->sopt_name)) != 0; 926 NLCTL_RUNLOCK(ctl); 927 error = sooptcopyout(sopt, &optval, sizeof(optval)); 928 break; 929 default: 930 error = ENOPROTOOPT; 931 } 932 break; 933 default: 934 error = ENOPROTOOPT; 935 } 936 937 return (error); 938 } 939 940 static int 941 sysctl_handle_nl_maxsockbuf(SYSCTL_HANDLER_ARGS) 942 { 943 int error = 0; 944 u_long tmp_maxsockbuf = nl_maxsockbuf; 945 946 error = sysctl_handle_long(oidp, &tmp_maxsockbuf, arg2, req); 947 if (error || !req->newptr) 948 return (error); 949 if (tmp_maxsockbuf < MSIZE + MCLBYTES) 950 return (EINVAL); 951 nl_maxsockbuf = tmp_maxsockbuf; 952 953 return (0); 954 } 955 956 static int 957 nl_setsbopt(struct socket *so, struct sockopt *sopt) 958 { 959 int error, optval; 960 bool result; 961 962 if (sopt->sopt_name != SO_RCVBUF) 963 return (sbsetopt(so, sopt)); 964 965 /* Allow to override max buffer size in certain conditions */ 966 967 error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval); 968 if (error != 0) 969 return (error); 970 NL_LOG(LOG_DEBUG2, "socket %p, PID %d, SO_RCVBUF=%d", so, curproc->p_pid, optval); 971 if (optval > sb_max_adj) { 972 if (priv_check(curthread, PRIV_NET_ROUTE) != 0) 973 return (EPERM); 974 } 975 976 SOCK_RECVBUF_LOCK(so); 977 result = sbreserve_locked_limit(so, SO_RCV, optval, nl_maxsockbuf, curthread); 978 SOCK_RECVBUF_UNLOCK(so); 979 980 return (result ? 0 : ENOBUFS); 981 } 982 983 #define NETLINK_PROTOSW \ 984 .pr_flags = PR_ATOMIC | PR_ADDR | PR_SOCKBUF, \ 985 .pr_ctloutput = nl_ctloutput, \ 986 .pr_setsbopt = nl_setsbopt, \ 987 .pr_attach = nl_pru_attach, \ 988 .pr_bind = nl_pru_bind, \ 989 .pr_connect = nl_pru_connect, \ 990 .pr_disconnect = nl_pru_disconnect, \ 991 .pr_sosend = nl_sosend, \ 992 .pr_soreceive = nl_soreceive, \ 993 .pr_sockaddr = nl_sockaddr, \ 994 .pr_close = nl_close 995 996 static struct protosw netlink_raw_sw = { 997 .pr_type = SOCK_RAW, 998 NETLINK_PROTOSW 999 }; 1000 1001 static struct protosw netlink_dgram_sw = { 1002 .pr_type = SOCK_DGRAM, 1003 NETLINK_PROTOSW 1004 }; 1005 1006 static struct domain netlinkdomain = { 1007 .dom_family = PF_NETLINK, 1008 .dom_name = "netlink", 1009 .dom_flags = DOMF_UNLOADABLE, 1010 .dom_nprotosw = 2, 1011 .dom_protosw = { &netlink_raw_sw, &netlink_dgram_sw }, 1012 }; 1013 1014 DOMAIN_SET(netlink); 1015