1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2021 Ng Peng Nam Sean
5 * Copyright (c) 2022 Alexander V. Chernikov <melifaro@FreeBSD.org>
6 * Copyright (c) 2023 Gleb Smirnoff <glebius@FreeBSD.org>
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * This file contains socket and protocol bindings for netlink.
32 */
33
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/lock.h>
38 #include <sys/rmlock.h>
39 #include <sys/domain.h>
40 #include <sys/jail.h>
41 #include <sys/mbuf.h>
42 #include <sys/osd.h>
43 #include <sys/protosw.h>
44 #include <sys/proc.h>
45 #include <sys/ck.h>
46 #include <sys/socket.h>
47 #include <sys/socketvar.h>
48 #include <sys/sysent.h>
49 #include <sys/syslog.h>
50 #include <sys/priv.h>
51 #include <sys/uio.h>
52
53 #include <netlink/netlink.h>
54 #include <netlink/netlink_ctl.h>
55 #include <netlink/netlink_var.h>
56
57 #define DEBUG_MOD_NAME nl_domain
58 #define DEBUG_MAX_LEVEL LOG_DEBUG3
59 #include <netlink/netlink_debug.h>
60 _DECLARE_DEBUG(LOG_INFO);
61
62 _Static_assert((NLP_MAX_GROUPS % 64) == 0,
63 "NLP_MAX_GROUPS has to be multiple of 64");
64 _Static_assert(NLP_MAX_GROUPS >= 64,
65 "NLP_MAX_GROUPS has to be at least 64");
66
67 #define NLCTL_TRACKER struct rm_priotracker nl_tracker
68 #define NLCTL_RLOCK(_ctl) rm_rlock(&((_ctl)->ctl_lock), &nl_tracker)
69 #define NLCTL_RUNLOCK(_ctl) rm_runlock(&((_ctl)->ctl_lock), &nl_tracker)
70
71 #define NLCTL_WLOCK(_ctl) rm_wlock(&((_ctl)->ctl_lock))
72 #define NLCTL_WUNLOCK(_ctl) rm_wunlock(&((_ctl)->ctl_lock))
73
74 static u_long nl_sendspace = NLSNDQ;
75 SYSCTL_ULONG(_net_netlink, OID_AUTO, sendspace, CTLFLAG_RW, &nl_sendspace, 0,
76 "Default netlink socket send space");
77
78 static u_long nl_recvspace = NLSNDQ;
79 SYSCTL_ULONG(_net_netlink, OID_AUTO, recvspace, CTLFLAG_RW, &nl_recvspace, 0,
80 "Default netlink socket receive space");
81
82 extern u_long sb_max_adj;
83 static u_long nl_maxsockbuf = 512 * 1024 * 1024; /* 512M, XXX: init based on physmem */
84 static int sysctl_handle_nl_maxsockbuf(SYSCTL_HANDLER_ARGS);
85 SYSCTL_OID(_net_netlink, OID_AUTO, nl_maxsockbuf,
86 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, &nl_maxsockbuf, 0,
87 sysctl_handle_nl_maxsockbuf, "LU",
88 "Maximum Netlink socket buffer size");
89
90
91 static unsigned int osd_slot_id = 0;
92
93 void
nl_osd_register(void)94 nl_osd_register(void)
95 {
96 osd_slot_id = osd_register(OSD_THREAD, NULL, NULL);
97 }
98
99 void
nl_osd_unregister(void)100 nl_osd_unregister(void)
101 {
102 osd_deregister(OSD_THREAD, osd_slot_id);
103 }
104
105 struct nlpcb *
_nl_get_thread_nlp(struct thread * td)106 _nl_get_thread_nlp(struct thread *td)
107 {
108 return (osd_get(OSD_THREAD, &td->td_osd, osd_slot_id));
109 }
110
111 void
nl_set_thread_nlp(struct thread * td,struct nlpcb * nlp)112 nl_set_thread_nlp(struct thread *td, struct nlpcb *nlp)
113 {
114 NLP_LOG(LOG_DEBUG2, nlp, "Set thread %p nlp to %p (slot %u)", td, nlp, osd_slot_id);
115 if (osd_set(OSD_THREAD, &td->td_osd, osd_slot_id, nlp) == 0)
116 return;
117 /* Failed, need to realloc */
118 void **rsv = osd_reserve(osd_slot_id);
119 osd_set_reserved(OSD_THREAD, &td->td_osd, osd_slot_id, rsv, nlp);
120 }
121
122 /*
123 * Looks up a nlpcb struct based on the @portid. Need to claim nlsock_mtx.
124 * Returns nlpcb pointer if present else NULL
125 */
126 static struct nlpcb *
nl_port_lookup(uint32_t port_id)127 nl_port_lookup(uint32_t port_id)
128 {
129 struct nlpcb *nlp;
130
131 CK_LIST_FOREACH(nlp, &V_nl_ctl->ctl_port_head, nl_port_next) {
132 if (nlp->nl_port == port_id)
133 return (nlp);
134 }
135 return (NULL);
136 }
137
138 static void
nl_add_group_locked(struct nlpcb * nlp,unsigned int group_id)139 nl_add_group_locked(struct nlpcb *nlp, unsigned int group_id)
140 {
141 MPASS(group_id < NLP_MAX_GROUPS);
142
143 /* TODO: add family handler callback */
144 if (!nlp_unconstrained_vnet(nlp))
145 return;
146
147 BIT_SET(NLP_MAX_GROUPS, group_id, &nlp->nl_groups);
148 }
149
150 static void
nl_del_group_locked(struct nlpcb * nlp,unsigned int group_id)151 nl_del_group_locked(struct nlpcb *nlp, unsigned int group_id)
152 {
153 MPASS(group_id < NLP_MAX_GROUPS);
154
155 BIT_CLR(NLP_MAX_GROUPS, group_id, &nlp->nl_groups);
156 }
157
158 static bool
nl_isset_group_locked(struct nlpcb * nlp,unsigned int group_id)159 nl_isset_group_locked(struct nlpcb *nlp, unsigned int group_id)
160 {
161 MPASS(group_id < NLP_MAX_GROUPS);
162
163 return (BIT_ISSET(NLP_MAX_GROUPS, group_id, &nlp->nl_groups));
164 }
165
166 static uint32_t
nl_get_groups_compat(struct nlpcb * nlp)167 nl_get_groups_compat(struct nlpcb *nlp)
168 {
169 uint32_t groups_mask = 0;
170
171 for (int i = 0; i < 32; i++) {
172 if (nl_isset_group_locked(nlp, i + 1))
173 groups_mask |= (1 << i);
174 }
175
176 return (groups_mask);
177 }
178
179 static struct nl_buf *
nl_buf_copy(struct nl_buf * nb)180 nl_buf_copy(struct nl_buf *nb)
181 {
182 struct nl_buf *copy;
183
184 copy = nl_buf_alloc(nb->buflen, M_NOWAIT);
185 if (__predict_false(copy == NULL))
186 return (NULL);
187 memcpy(copy, nb, sizeof(*nb) + nb->buflen);
188
189 return (copy);
190 }
191
192 /*
193 * Broadcasts in the writer's buffer.
194 */
195 bool
nl_send_group(struct nl_writer * nw)196 nl_send_group(struct nl_writer *nw)
197 {
198 struct nl_buf *nb = nw->buf;
199 struct nlpcb *nlp_last = NULL;
200 struct nlpcb *nlp;
201 NLCTL_TRACKER;
202
203 IF_DEBUG_LEVEL(LOG_DEBUG2) {
204 struct nlmsghdr *hdr = (struct nlmsghdr *)nb->data;
205 NL_LOG(LOG_DEBUG2, "MCAST len %u msg type %d len %u to group %d/%d",
206 nb->datalen, hdr->nlmsg_type, hdr->nlmsg_len,
207 nw->group.proto, nw->group.id);
208 }
209
210 nw->buf = NULL;
211
212 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl);
213 if (__predict_false(ctl == NULL)) {
214 /*
215 * Can be the case when notification is sent within VNET
216 * which doesn't have any netlink sockets.
217 */
218 nl_buf_free(nb);
219 return (false);
220 }
221
222 NLCTL_RLOCK(ctl);
223
224 CK_LIST_FOREACH(nlp, &ctl->ctl_pcb_head, nl_next) {
225 if ((nw->group.priv == 0 || priv_check_cred(
226 nlp->nl_socket->so_cred, nw->group.priv) == 0) &&
227 nlp->nl_proto == nw->group.proto &&
228 nl_isset_group_locked(nlp, nw->group.id)) {
229 if (nlp_last != NULL) {
230 struct nl_buf *copy;
231
232 copy = nl_buf_copy(nb);
233 if (copy != NULL) {
234 nw->buf = copy;
235 (void)nl_send(nw, nlp_last);
236 } else {
237 NLP_LOCK(nlp_last);
238 if (nlp_last->nl_socket != NULL)
239 sorwakeup(nlp_last->nl_socket);
240 NLP_UNLOCK(nlp_last);
241 }
242 }
243 nlp_last = nlp;
244 }
245 }
246 if (nlp_last != NULL) {
247 nw->buf = nb;
248 (void)nl_send(nw, nlp_last);
249 } else
250 nl_buf_free(nb);
251
252 NLCTL_RUNLOCK(ctl);
253
254 return (true);
255 }
256
257 bool
nl_has_listeners(uint16_t netlink_family,uint32_t groups_mask)258 nl_has_listeners(uint16_t netlink_family, uint32_t groups_mask)
259 {
260 return (V_nl_ctl != NULL);
261 }
262
263 static uint32_t
nl_find_port(void)264 nl_find_port(void)
265 {
266 /*
267 * app can open multiple netlink sockets.
268 * Start with current pid, if already taken,
269 * try random numbers in 65k..256k+65k space,
270 * avoiding clash with pids.
271 */
272 if (nl_port_lookup(curproc->p_pid) == NULL)
273 return (curproc->p_pid);
274 for (int i = 0; i < 16; i++) {
275 uint32_t nl_port = (arc4random() % 65536) + 65536 * 4;
276 if (nl_port_lookup(nl_port) == 0)
277 return (nl_port);
278 NL_LOG(LOG_DEBUG3, "tried %u\n", nl_port);
279 }
280 return (curproc->p_pid);
281 }
282
283 static int
nl_bind_locked(struct nlpcb * nlp,struct sockaddr_nl * snl)284 nl_bind_locked(struct nlpcb *nlp, struct sockaddr_nl *snl)
285 {
286 if (nlp->nl_bound) {
287 if (nlp->nl_port != snl->nl_pid) {
288 NL_LOG(LOG_DEBUG,
289 "bind() failed: program pid %d "
290 "is different from provided pid %d",
291 nlp->nl_port, snl->nl_pid);
292 return (EINVAL); // XXX: better error
293 }
294 } else {
295 if (snl->nl_pid == 0)
296 snl->nl_pid = nl_find_port();
297 if (nl_port_lookup(snl->nl_pid) != NULL)
298 return (EADDRINUSE);
299 nlp->nl_port = snl->nl_pid;
300 nlp->nl_bound = true;
301 CK_LIST_INSERT_HEAD(&V_nl_ctl->ctl_port_head, nlp, nl_port_next);
302 }
303 for (int i = 0; i < 32; i++) {
304 if (snl->nl_groups & ((uint32_t)1 << i))
305 nl_add_group_locked(nlp, i + 1);
306 else
307 nl_del_group_locked(nlp, i + 1);
308 }
309
310 return (0);
311 }
312
313 static int
nl_pru_attach(struct socket * so,int proto,struct thread * td)314 nl_pru_attach(struct socket *so, int proto, struct thread *td)
315 {
316 struct nlpcb *nlp;
317 int error;
318
319 if (__predict_false(netlink_unloading != 0))
320 return (EAFNOSUPPORT);
321
322 error = nl_verify_proto(proto);
323 if (error != 0)
324 return (error);
325
326 bool is_linux = SV_PROC_ABI(td->td_proc) == SV_ABI_LINUX;
327 NL_LOG(LOG_DEBUG2, "socket %p, %sPID %d: attaching socket to %s",
328 so, is_linux ? "(linux) " : "", curproc->p_pid,
329 nl_get_proto_name(proto));
330
331 /* Create per-VNET state on first socket init */
332 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl);
333 if (ctl == NULL)
334 ctl = vnet_nl_ctl_init();
335 KASSERT(V_nl_ctl != NULL, ("nl_attach: vnet_sock_init() failed"));
336
337 MPASS(sotonlpcb(so) == NULL);
338
339 nlp = malloc(sizeof(struct nlpcb), M_PCB, M_WAITOK | M_ZERO);
340 error = soreserve(so, nl_sendspace, nl_recvspace);
341 if (error != 0) {
342 free(nlp, M_PCB);
343 return (error);
344 }
345 TAILQ_INIT(&so->so_rcv.nl_queue);
346 TAILQ_INIT(&so->so_snd.nl_queue);
347 so->so_pcb = nlp;
348 nlp->nl_socket = so;
349 /* Copy so_cred to avoid having socket_var.h in every header */
350 nlp->nl_cred = so->so_cred;
351 nlp->nl_proto = proto;
352 nlp->nl_process_id = curproc->p_pid;
353 nlp->nl_linux = is_linux;
354 nlp->nl_unconstrained_vnet = !jailed_without_vnet(so->so_cred);
355 nlp->nl_need_thread_setup = true;
356 NLP_LOCK_INIT(nlp);
357 refcount_init(&nlp->nl_refcount, 1);
358
359 nlp->nl_taskqueue = taskqueue_create("netlink_socket", M_WAITOK,
360 taskqueue_thread_enqueue, &nlp->nl_taskqueue);
361 TASK_INIT(&nlp->nl_task, 0, nl_taskqueue_handler, nlp);
362 taskqueue_start_threads(&nlp->nl_taskqueue, 1, PWAIT,
363 "netlink_socket (PID %u)", nlp->nl_process_id);
364
365 NLCTL_WLOCK(ctl);
366 /* XXX: check ctl is still alive */
367 CK_LIST_INSERT_HEAD(&ctl->ctl_pcb_head, nlp, nl_next);
368 NLCTL_WUNLOCK(ctl);
369
370 soisconnected(so);
371
372 return (0);
373 }
374
375 static int
nl_pru_bind(struct socket * so,struct sockaddr * sa,struct thread * td)376 nl_pru_bind(struct socket *so, struct sockaddr *sa, struct thread *td)
377 {
378 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl);
379 struct nlpcb *nlp = sotonlpcb(so);
380 struct sockaddr_nl *snl = (struct sockaddr_nl *)sa;
381 int error;
382
383 NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid);
384 if (snl->nl_len != sizeof(*snl)) {
385 NL_LOG(LOG_DEBUG, "socket %p, wrong sizeof(), ignoring bind()", so);
386 return (EINVAL);
387 }
388
389
390 NLCTL_WLOCK(ctl);
391 NLP_LOCK(nlp);
392 error = nl_bind_locked(nlp, snl);
393 NLP_UNLOCK(nlp);
394 NLCTL_WUNLOCK(ctl);
395 NL_LOG(LOG_DEBUG2, "socket %p, bind() to %u, groups %u, error %d", so,
396 snl->nl_pid, snl->nl_groups, error);
397
398 return (error);
399 }
400
401
402 static int
nl_assign_port(struct nlpcb * nlp,uint32_t port_id)403 nl_assign_port(struct nlpcb *nlp, uint32_t port_id)
404 {
405 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl);
406 struct sockaddr_nl snl = {
407 .nl_pid = port_id,
408 };
409 int error;
410
411 NLCTL_WLOCK(ctl);
412 NLP_LOCK(nlp);
413 snl.nl_groups = nl_get_groups_compat(nlp);
414 error = nl_bind_locked(nlp, &snl);
415 NLP_UNLOCK(nlp);
416 NLCTL_WUNLOCK(ctl);
417
418 NL_LOG(LOG_DEBUG3, "socket %p, port assign: %d, error: %d", nlp->nl_socket, port_id, error);
419 return (error);
420 }
421
422 /*
423 * nl_autobind_port binds a unused portid to @nlp
424 * @nlp: pcb data for the netlink socket
425 * @candidate_id: first id to consider
426 */
427 static int
nl_autobind_port(struct nlpcb * nlp,uint32_t candidate_id)428 nl_autobind_port(struct nlpcb *nlp, uint32_t candidate_id)
429 {
430 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl);
431 uint32_t port_id = candidate_id;
432 NLCTL_TRACKER;
433 bool exist;
434 int error = EADDRINUSE;
435
436 for (int i = 0; i < 10; i++) {
437 NL_LOG(LOG_DEBUG3, "socket %p, trying to assign port %d", nlp->nl_socket, port_id);
438 NLCTL_RLOCK(ctl);
439 exist = nl_port_lookup(port_id) != 0;
440 NLCTL_RUNLOCK(ctl);
441 if (!exist) {
442 error = nl_assign_port(nlp, port_id);
443 if (error != EADDRINUSE)
444 break;
445 }
446 port_id++;
447 }
448 NL_LOG(LOG_DEBUG3, "socket %p, autobind to %d, error: %d", nlp->nl_socket, port_id, error);
449 return (error);
450 }
451
452 static int
nl_pru_connect(struct socket * so,struct sockaddr * sa,struct thread * td)453 nl_pru_connect(struct socket *so, struct sockaddr *sa, struct thread *td)
454 {
455 struct sockaddr_nl *snl = (struct sockaddr_nl *)sa;
456 struct nlpcb *nlp;
457
458 NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid);
459 if (snl->nl_len != sizeof(*snl)) {
460 NL_LOG(LOG_DEBUG, "socket %p, wrong sizeof(), ignoring bind()", so);
461 return (EINVAL);
462 }
463
464 nlp = sotonlpcb(so);
465 if (!nlp->nl_bound) {
466 int error = nl_autobind_port(nlp, td->td_proc->p_pid);
467 if (error != 0) {
468 NL_LOG(LOG_DEBUG, "socket %p, nl_autobind() failed: %d", so, error);
469 return (error);
470 }
471 }
472 /* XXX: Handle socket flags & multicast */
473 soisconnected(so);
474
475 NL_LOG(LOG_DEBUG2, "socket %p, connect to %u", so, snl->nl_pid);
476
477 return (0);
478 }
479
480 static void
destroy_nlpcb_epoch(epoch_context_t ctx)481 destroy_nlpcb_epoch(epoch_context_t ctx)
482 {
483 struct nlpcb *nlp;
484
485 nlp = __containerof(ctx, struct nlpcb, nl_epoch_ctx);
486
487 NLP_LOCK_DESTROY(nlp);
488 free(nlp, M_PCB);
489 }
490
491 static void
nl_close(struct socket * so)492 nl_close(struct socket *so)
493 {
494 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl);
495 MPASS(sotonlpcb(so) != NULL);
496 struct nlpcb *nlp;
497 struct nl_buf *nb;
498
499 NL_LOG(LOG_DEBUG2, "detaching socket %p, PID %d", so, curproc->p_pid);
500 nlp = sotonlpcb(so);
501
502 /* Mark as inactive so no new work can be enqueued */
503 NLP_LOCK(nlp);
504 bool was_bound = nlp->nl_bound;
505 NLP_UNLOCK(nlp);
506
507 /* Wait till all scheduled work has been completed */
508 taskqueue_drain_all(nlp->nl_taskqueue);
509 taskqueue_free(nlp->nl_taskqueue);
510
511 NLCTL_WLOCK(ctl);
512 NLP_LOCK(nlp);
513 if (was_bound) {
514 CK_LIST_REMOVE(nlp, nl_port_next);
515 NL_LOG(LOG_DEBUG3, "socket %p, unlinking bound pid %u", so, nlp->nl_port);
516 }
517 CK_LIST_REMOVE(nlp, nl_next);
518 nlp->nl_socket = NULL;
519 NLP_UNLOCK(nlp);
520 NLCTL_WUNLOCK(ctl);
521
522 so->so_pcb = NULL;
523
524 while ((nb = TAILQ_FIRST(&so->so_snd.nl_queue)) != NULL) {
525 TAILQ_REMOVE(&so->so_snd.nl_queue, nb, tailq);
526 nl_buf_free(nb);
527 }
528 while ((nb = TAILQ_FIRST(&so->so_rcv.nl_queue)) != NULL) {
529 TAILQ_REMOVE(&so->so_rcv.nl_queue, nb, tailq);
530 nl_buf_free(nb);
531 }
532
533 NL_LOG(LOG_DEBUG3, "socket %p, detached", so);
534
535 /* XXX: is delayed free needed? */
536 NET_EPOCH_CALL(destroy_nlpcb_epoch, &nlp->nl_epoch_ctx);
537 }
538
539 static int
nl_pru_disconnect(struct socket * so)540 nl_pru_disconnect(struct socket *so)
541 {
542 NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid);
543 MPASS(sotonlpcb(so) != NULL);
544 return (ENOTCONN);
545 }
546
547 static int
nl_sockaddr(struct socket * so,struct sockaddr * sa)548 nl_sockaddr(struct socket *so, struct sockaddr *sa)
549 {
550
551 *(struct sockaddr_nl *)sa = (struct sockaddr_nl ){
552 /* TODO: set other fields */
553 .nl_len = sizeof(struct sockaddr_nl),
554 .nl_family = AF_NETLINK,
555 .nl_pid = sotonlpcb(so)->nl_port,
556 };
557
558 return (0);
559 }
560
561 static int
nl_sosend(struct socket * so,struct sockaddr * addr,struct uio * uio,struct mbuf * m,struct mbuf * control,int flags,struct thread * td)562 nl_sosend(struct socket *so, struct sockaddr *addr, struct uio *uio,
563 struct mbuf *m, struct mbuf *control, int flags, struct thread *td)
564 {
565 struct nlpcb *nlp = sotonlpcb(so);
566 struct sockbuf *sb = &so->so_snd;
567 struct nl_buf *nb;
568 size_t len;
569 int error;
570
571 MPASS(m == NULL && uio != NULL);
572
573 NL_LOG(LOG_DEBUG2, "sending message to kernel");
574
575 if (__predict_false(control != NULL)) {
576 m_freem(control);
577 return (EINVAL);
578 }
579
580 if (__predict_false(flags & MSG_OOB)) /* XXXGL: or just ignore? */
581 return (EOPNOTSUPP);
582
583 if (__predict_false(uio->uio_resid < sizeof(struct nlmsghdr)))
584 return (ENOBUFS); /* XXXGL: any better error? */
585
586 NL_LOG(LOG_DEBUG3, "sending message to kernel async processing");
587
588 error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
589 if (error)
590 return (error);
591
592 len = roundup2(uio->uio_resid, 8) + SCRATCH_BUFFER_SIZE;
593 if (nlp->nl_linux)
594 len += roundup2(uio->uio_resid, 8);
595 nb = nl_buf_alloc(len, M_WAITOK);
596 nb->datalen = uio->uio_resid;
597 error = uiomove(&nb->data[0], uio->uio_resid, uio);
598 if (__predict_false(error))
599 goto out;
600
601 SOCK_SENDBUF_LOCK(so);
602 restart:
603 if (sb->sb_hiwat - sb->sb_ccc >= nb->datalen) {
604 TAILQ_INSERT_TAIL(&sb->nl_queue, nb, tailq);
605 sb->sb_acc += nb->datalen;
606 sb->sb_ccc += nb->datalen;
607 nb = NULL;
608 } else if ((so->so_state & SS_NBIO) ||
609 (flags & (MSG_NBIO | MSG_DONTWAIT)) != 0) {
610 SOCK_SENDBUF_UNLOCK(so);
611 error = EWOULDBLOCK;
612 goto out;
613 } else {
614 if ((error = sbwait(so, SO_SND)) != 0) {
615 SOCK_SENDBUF_UNLOCK(so);
616 goto out;
617 } else
618 goto restart;
619 }
620 SOCK_SENDBUF_UNLOCK(so);
621
622 if (nb == NULL) {
623 NL_LOG(LOG_DEBUG3, "enqueue %u bytes", nb->datalen);
624 NLP_LOCK(nlp);
625 nl_schedule_taskqueue(nlp);
626 NLP_UNLOCK(nlp);
627 }
628
629 out:
630 SOCK_IO_SEND_UNLOCK(so);
631 if (nb != NULL)
632 nl_buf_free(nb);
633 return (error);
634 }
635
636 /* Create control data for recvmsg(2) on Netlink socket. */
637 static struct mbuf *
nl_createcontrol(struct nlpcb * nlp)638 nl_createcontrol(struct nlpcb *nlp)
639 {
640 struct {
641 struct nlattr nla;
642 uint32_t val;
643 } data[] = {
644 {
645 .nla.nla_len = sizeof(struct nlattr) + sizeof(uint32_t),
646 .nla.nla_type = NLMSGINFO_ATTR_PROCESS_ID,
647 .val = nlp->nl_process_id,
648 },
649 {
650 .nla.nla_len = sizeof(struct nlattr) + sizeof(uint32_t),
651 .nla.nla_type = NLMSGINFO_ATTR_PORT_ID,
652 .val = nlp->nl_port,
653 },
654 };
655
656 return (sbcreatecontrol(data, sizeof(data), NETLINK_MSG_INFO,
657 SOL_NETLINK, M_WAITOK));
658 }
659
660 static int
nl_soreceive(struct socket * so,struct sockaddr ** psa,struct uio * uio,struct mbuf ** mp,struct mbuf ** controlp,int * flagsp)661 nl_soreceive(struct socket *so, struct sockaddr **psa, struct uio *uio,
662 struct mbuf **mp, struct mbuf **controlp, int *flagsp)
663 {
664 static const struct sockaddr_nl nl_empty_src = {
665 .nl_len = sizeof(struct sockaddr_nl),
666 .nl_family = PF_NETLINK,
667 .nl_pid = 0 /* comes from the kernel */
668 };
669 struct sockbuf *sb = &so->so_rcv;
670 struct nlpcb *nlp = sotonlpcb(so);
671 struct nl_buf *first, *last, *nb, *next;
672 struct nlmsghdr *hdr;
673 int flags, error;
674 u_int len, overflow, partoff, partlen, msgrcv, datalen;
675 bool nonblock, trunc, peek;
676
677 MPASS(mp == NULL && uio != NULL);
678
679 NL_LOG(LOG_DEBUG3, "socket %p, PID %d", so, curproc->p_pid);
680
681 if (psa != NULL)
682 *psa = sodupsockaddr((const struct sockaddr *)&nl_empty_src,
683 M_WAITOK);
684
685 if (controlp != NULL && (nlp->nl_flags & NLF_MSG_INFO))
686 *controlp = nl_createcontrol(nlp);
687
688 flags = flagsp != NULL ? *flagsp & ~MSG_TRUNC : 0;
689 trunc = flagsp != NULL ? *flagsp & MSG_TRUNC : false;
690 nonblock = (so->so_state & SS_NBIO) ||
691 (flags & (MSG_DONTWAIT | MSG_NBIO));
692 peek = flags & MSG_PEEK;
693
694 error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
695 if (__predict_false(error))
696 return (error);
697
698 len = 0;
699 overflow = 0;
700 msgrcv = 0;
701 datalen = 0;
702
703 SOCK_RECVBUF_LOCK(so);
704 while ((first = TAILQ_FIRST(&sb->nl_queue)) == NULL) {
705 if (nonblock) {
706 SOCK_RECVBUF_UNLOCK(so);
707 SOCK_IO_RECV_UNLOCK(so);
708 return (EWOULDBLOCK);
709 }
710 error = sbwait(so, SO_RCV);
711 if (error) {
712 SOCK_RECVBUF_UNLOCK(so);
713 SOCK_IO_RECV_UNLOCK(so);
714 return (error);
715 }
716 }
717
718 /*
719 * Netlink socket buffer consists of a queue of nl_bufs, but for the
720 * userland there should be no boundaries. However, there are Netlink
721 * messages, that shouldn't be split. Internal invariant is that a
722 * message never spans two nl_bufs.
723 * If a large userland buffer is provided, we would traverse the queue
724 * until either queue end is reached or the buffer is fulfilled. If
725 * an application provides a buffer that isn't able to fit a single
726 * message, we would truncate it and lose its tail. This is the only
727 * condition where we would lose data. If buffer is able to fit at
728 * least one message, we would return it and won't truncate the next.
729 *
730 * We use same code for normal and MSG_PEEK case. At first queue pass
731 * we scan nl_bufs and count lenght. In case we can read entire buffer
732 * at one write everything is trivial. In case we can not, we save
733 * pointer to the last (or partial) nl_buf and in the !peek case we
734 * split the queue into two pieces. We can safely drop the queue lock,
735 * as kernel would only append nl_bufs to the end of the queue, and
736 * we are the exclusive owner of queue beginning due to sleepable lock.
737 * At the second pass we copy data out and in !peek case free nl_bufs.
738 */
739 TAILQ_FOREACH(nb, &sb->nl_queue, tailq) {
740 u_int offset;
741
742 MPASS(nb->offset < nb->datalen);
743 offset = nb->offset;
744 while (offset < nb->datalen) {
745 hdr = (struct nlmsghdr *)&nb->data[offset];
746 MPASS(nb->offset + hdr->nlmsg_len <= nb->datalen);
747 if (uio->uio_resid < len + hdr->nlmsg_len) {
748 overflow = len + hdr->nlmsg_len -
749 uio->uio_resid;
750 partoff = nb->offset;
751 if (offset > partoff) {
752 partlen = offset - partoff;
753 if (!peek) {
754 nb->offset = offset;
755 datalen += partlen;
756 }
757 } else if (len == 0 && uio->uio_resid > 0) {
758 flags |= MSG_TRUNC;
759 partlen = uio->uio_resid;
760 if (peek)
761 goto nospace;
762 datalen += hdr->nlmsg_len;
763 if (nb->offset + hdr->nlmsg_len ==
764 nb->datalen) {
765 /*
766 * Avoid leaving empty nb.
767 * Process last nb normally.
768 * Trust uiomove() to care
769 * about negative uio_resid.
770 */
771 nb = TAILQ_NEXT(nb, tailq);
772 overflow = 0;
773 partlen = 0;
774 } else
775 nb->offset += hdr->nlmsg_len;
776 msgrcv++;
777 } else
778 partlen = 0;
779 goto nospace;
780 }
781 len += hdr->nlmsg_len;
782 offset += hdr->nlmsg_len;
783 MPASS(offset <= nb->buflen);
784 msgrcv++;
785 }
786 MPASS(offset == nb->datalen);
787 datalen += nb->datalen - nb->offset;
788 }
789 nospace:
790 last = nb;
791 if (!peek) {
792 if (last == NULL)
793 TAILQ_INIT(&sb->nl_queue);
794 else {
795 /* XXXGL: create TAILQ_SPLIT */
796 TAILQ_FIRST(&sb->nl_queue) = last;
797 last->tailq.tqe_prev = &TAILQ_FIRST(&sb->nl_queue);
798 }
799 MPASS(sb->sb_acc >= datalen);
800 sb->sb_acc -= datalen;
801 sb->sb_ccc -= datalen;
802 }
803 SOCK_RECVBUF_UNLOCK(so);
804
805 for (nb = first; nb != last; nb = next) {
806 next = TAILQ_NEXT(nb, tailq);
807 if (__predict_true(error == 0))
808 error = uiomove(&nb->data[nb->offset],
809 (int)(nb->datalen - nb->offset), uio);
810 if (!peek)
811 nl_buf_free(nb);
812 }
813 if (last != NULL && partlen > 0 && __predict_true(error == 0))
814 error = uiomove(&nb->data[partoff], (int)partlen, uio);
815
816 if (trunc && overflow > 0) {
817 uio->uio_resid -= overflow;
818 MPASS(uio->uio_resid < 0);
819 } else
820 MPASS(uio->uio_resid >= 0);
821
822 if (uio->uio_td)
823 uio->uio_td->td_ru.ru_msgrcv += msgrcv;
824
825 if (flagsp != NULL)
826 *flagsp |= flags;
827
828 SOCK_IO_RECV_UNLOCK(so);
829
830 nl_on_transmit(sotonlpcb(so));
831
832 return (error);
833 }
834
835 static int
nl_getoptflag(int sopt_name)836 nl_getoptflag(int sopt_name)
837 {
838 switch (sopt_name) {
839 case NETLINK_CAP_ACK:
840 return (NLF_CAP_ACK);
841 case NETLINK_EXT_ACK:
842 return (NLF_EXT_ACK);
843 case NETLINK_GET_STRICT_CHK:
844 return (NLF_STRICT);
845 case NETLINK_MSG_INFO:
846 return (NLF_MSG_INFO);
847 }
848
849 return (0);
850 }
851
852 static int
nl_ctloutput(struct socket * so,struct sockopt * sopt)853 nl_ctloutput(struct socket *so, struct sockopt *sopt)
854 {
855 struct nl_control *ctl = atomic_load_ptr(&V_nl_ctl);
856 struct nlpcb *nlp = sotonlpcb(so);
857 uint32_t flag;
858 int optval, error = 0;
859 NLCTL_TRACKER;
860
861 NL_LOG(LOG_DEBUG2, "%ssockopt(%p, %d)", (sopt->sopt_dir) ? "set" : "get",
862 so, sopt->sopt_name);
863
864 switch (sopt->sopt_dir) {
865 case SOPT_SET:
866 switch (sopt->sopt_name) {
867 case NETLINK_ADD_MEMBERSHIP:
868 case NETLINK_DROP_MEMBERSHIP:
869 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
870 if (error != 0)
871 break;
872 if (optval <= 0 || optval >= NLP_MAX_GROUPS) {
873 error = ERANGE;
874 break;
875 }
876 NL_LOG(LOG_DEBUG2, "ADD/DEL group %d", (uint32_t)optval);
877
878 NLCTL_WLOCK(ctl);
879 if (sopt->sopt_name == NETLINK_ADD_MEMBERSHIP)
880 nl_add_group_locked(nlp, optval);
881 else
882 nl_del_group_locked(nlp, optval);
883 NLCTL_WUNLOCK(ctl);
884 break;
885 case NETLINK_CAP_ACK:
886 case NETLINK_EXT_ACK:
887 case NETLINK_GET_STRICT_CHK:
888 case NETLINK_MSG_INFO:
889 error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval));
890 if (error != 0)
891 break;
892
893 flag = nl_getoptflag(sopt->sopt_name);
894
895 if ((flag == NLF_MSG_INFO) && nlp->nl_linux) {
896 error = EINVAL;
897 break;
898 }
899
900 NLCTL_WLOCK(ctl);
901 if (optval != 0)
902 nlp->nl_flags |= flag;
903 else
904 nlp->nl_flags &= ~flag;
905 NLCTL_WUNLOCK(ctl);
906 break;
907 default:
908 error = ENOPROTOOPT;
909 }
910 break;
911 case SOPT_GET:
912 switch (sopt->sopt_name) {
913 case NETLINK_LIST_MEMBERSHIPS:
914 NLCTL_RLOCK(ctl);
915 optval = nl_get_groups_compat(nlp);
916 NLCTL_RUNLOCK(ctl);
917 error = sooptcopyout(sopt, &optval, sizeof(optval));
918 break;
919 case NETLINK_CAP_ACK:
920 case NETLINK_EXT_ACK:
921 case NETLINK_GET_STRICT_CHK:
922 case NETLINK_MSG_INFO:
923 NLCTL_RLOCK(ctl);
924 optval = (nlp->nl_flags & nl_getoptflag(sopt->sopt_name)) != 0;
925 NLCTL_RUNLOCK(ctl);
926 error = sooptcopyout(sopt, &optval, sizeof(optval));
927 break;
928 default:
929 error = ENOPROTOOPT;
930 }
931 break;
932 default:
933 error = ENOPROTOOPT;
934 }
935
936 return (error);
937 }
938
939 static int
sysctl_handle_nl_maxsockbuf(SYSCTL_HANDLER_ARGS)940 sysctl_handle_nl_maxsockbuf(SYSCTL_HANDLER_ARGS)
941 {
942 int error = 0;
943 u_long tmp_maxsockbuf = nl_maxsockbuf;
944
945 error = sysctl_handle_long(oidp, &tmp_maxsockbuf, arg2, req);
946 if (error || !req->newptr)
947 return (error);
948 if (tmp_maxsockbuf < MSIZE + MCLBYTES)
949 return (EINVAL);
950 nl_maxsockbuf = tmp_maxsockbuf;
951
952 return (0);
953 }
954
955 static int
nl_setsbopt(struct socket * so,struct sockopt * sopt)956 nl_setsbopt(struct socket *so, struct sockopt *sopt)
957 {
958 int error, optval;
959 bool result;
960
961 if (sopt->sopt_name != SO_RCVBUF)
962 return (sbsetopt(so, sopt));
963
964 /* Allow to override max buffer size in certain conditions */
965
966 error = sooptcopyin(sopt, &optval, sizeof optval, sizeof optval);
967 if (error != 0)
968 return (error);
969 NL_LOG(LOG_DEBUG2, "socket %p, PID %d, SO_RCVBUF=%d", so, curproc->p_pid, optval);
970 if (optval > sb_max_adj) {
971 if (priv_check(curthread, PRIV_NET_ROUTE) != 0)
972 return (EPERM);
973 }
974
975 SOCK_RECVBUF_LOCK(so);
976 result = sbreserve_locked_limit(so, SO_RCV, optval, nl_maxsockbuf, curthread);
977 SOCK_RECVBUF_UNLOCK(so);
978
979 return (result ? 0 : ENOBUFS);
980 }
981
982 #define NETLINK_PROTOSW \
983 .pr_flags = PR_ATOMIC | PR_ADDR | PR_SOCKBUF, \
984 .pr_ctloutput = nl_ctloutput, \
985 .pr_setsbopt = nl_setsbopt, \
986 .pr_attach = nl_pru_attach, \
987 .pr_bind = nl_pru_bind, \
988 .pr_connect = nl_pru_connect, \
989 .pr_disconnect = nl_pru_disconnect, \
990 .pr_sosend = nl_sosend, \
991 .pr_soreceive = nl_soreceive, \
992 .pr_sockaddr = nl_sockaddr, \
993 .pr_close = nl_close
994
995 static struct protosw netlink_raw_sw = {
996 .pr_type = SOCK_RAW,
997 NETLINK_PROTOSW
998 };
999
1000 static struct protosw netlink_dgram_sw = {
1001 .pr_type = SOCK_DGRAM,
1002 NETLINK_PROTOSW
1003 };
1004
1005 static struct domain netlinkdomain = {
1006 .dom_family = PF_NETLINK,
1007 .dom_name = "netlink",
1008 .dom_flags = DOMF_UNLOADABLE,
1009 .dom_nprotosw = 2,
1010 .dom_protosw = { &netlink_raw_sw, &netlink_dgram_sw },
1011 };
1012
1013 DOMAIN_SET(netlink);
1014