1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Ng Peng Nam Sean 5 * Copyright (c) 2022 Alexander V. Chernikov <melifaro@FreeBSD.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/ck.h> 31 #include <sys/lock.h> 32 #include <sys/malloc.h> 33 #include <sys/mbuf.h> 34 #include <sys/mutex.h> 35 #include <sys/socket.h> 36 #include <sys/socketvar.h> 37 #include <sys/syslog.h> 38 39 #include <netlink/netlink.h> 40 #include <netlink/netlink_ctl.h> 41 #include <netlink/netlink_linux.h> 42 #include <netlink/netlink_var.h> 43 44 #define DEBUG_MOD_NAME nl_io 45 #define DEBUG_MAX_LEVEL LOG_DEBUG3 46 #include <netlink/netlink_debug.h> 47 _DECLARE_DEBUG(LOG_INFO); 48 49 /* 50 * The logic below provide a p2p interface for receiving and 51 * sending netlink data between the kernel and userland. 52 */ 53 54 static bool nl_process_nbuf(struct nl_buf *nb, struct nlpcb *nlp); 55 56 struct nl_buf * 57 nl_buf_alloc(size_t len, int mflag) 58 { 59 struct nl_buf *nb; 60 61 KASSERT(len > 0 && len <= UINT_MAX, ("%s: invalid length %zu", 62 __func__, len)); 63 64 nb = malloc(sizeof(struct nl_buf) + len, M_NETLINK, mflag); 65 if (__predict_true(nb != NULL)) { 66 nb->buflen = len; 67 nb->datalen = nb->offset = 0; 68 } 69 70 return (nb); 71 } 72 73 void 74 nl_buf_free(struct nl_buf *nb) 75 { 76 77 free(nb, M_NETLINK); 78 } 79 80 void 81 nl_schedule_taskqueue(struct nlpcb *nlp) 82 { 83 if (!nlp->nl_task_pending) { 84 nlp->nl_task_pending = true; 85 taskqueue_enqueue(nlp->nl_taskqueue, &nlp->nl_task); 86 NL_LOG(LOG_DEBUG3, "taskqueue scheduled"); 87 } else { 88 NL_LOG(LOG_DEBUG3, "taskqueue schedule skipped"); 89 } 90 } 91 92 static bool 93 nl_process_received_one(struct nlpcb *nlp) 94 { 95 struct socket *so = nlp->nl_socket; 96 struct sockbuf *sb; 97 struct nl_buf *nb; 98 bool reschedule = false; 99 100 NLP_LOCK(nlp); 101 nlp->nl_task_pending = false; 102 NLP_UNLOCK(nlp); 103 104 /* 105 * Do not process queued up requests if there is no space to queue 106 * replies. 107 */ 108 sb = &so->so_rcv; 109 SOCK_RECVBUF_LOCK(so); 110 if (sb->sb_hiwat <= sb->sb_ccc) { 111 SOCK_RECVBUF_UNLOCK(so); 112 return (false); 113 } 114 SOCK_RECVBUF_UNLOCK(so); 115 116 sb = &so->so_snd; 117 SOCK_SENDBUF_LOCK(so); 118 while ((nb = TAILQ_FIRST(&sb->nl_queue)) != NULL) { 119 TAILQ_REMOVE(&sb->nl_queue, nb, tailq); 120 SOCK_SENDBUF_UNLOCK(so); 121 reschedule = nl_process_nbuf(nb, nlp); 122 SOCK_SENDBUF_LOCK(so); 123 if (reschedule) { 124 sb->sb_acc -= nb->datalen; 125 sb->sb_ccc -= nb->datalen; 126 /* XXXGL: potentially can reduce lock&unlock count. */ 127 sowwakeup_locked(so); 128 nl_buf_free(nb); 129 SOCK_SENDBUF_LOCK(so); 130 } else { 131 TAILQ_INSERT_HEAD(&sb->nl_queue, nb, tailq); 132 break; 133 } 134 } 135 SOCK_SENDBUF_UNLOCK(so); 136 137 return (reschedule); 138 } 139 140 static void 141 nl_process_received(struct nlpcb *nlp) 142 { 143 NL_LOG(LOG_DEBUG3, "taskqueue called"); 144 145 if (__predict_false(nlp->nl_need_thread_setup)) { 146 nl_set_thread_nlp(curthread, nlp); 147 NLP_LOCK(nlp); 148 nlp->nl_need_thread_setup = false; 149 NLP_UNLOCK(nlp); 150 } 151 152 while (nl_process_received_one(nlp)) 153 ; 154 } 155 156 /* 157 * Called after some data have been read from the socket. 158 */ 159 void 160 nl_on_transmit(struct nlpcb *nlp) 161 { 162 NLP_LOCK(nlp); 163 164 struct socket *so = nlp->nl_socket; 165 if (__predict_false(nlp->nl_dropped_bytes > 0 && so != NULL)) { 166 unsigned long dropped_bytes = nlp->nl_dropped_bytes; 167 unsigned long dropped_messages = nlp->nl_dropped_messages; 168 nlp->nl_dropped_bytes = 0; 169 nlp->nl_dropped_messages = 0; 170 171 struct sockbuf *sb = &so->so_rcv; 172 NLP_LOG(LOG_DEBUG, nlp, 173 "socket RX overflowed, %lu messages (%lu bytes) dropped. " 174 "bytes: [%u/%u]", dropped_messages, dropped_bytes, 175 sb->sb_ccc, sb->sb_hiwat); 176 /* TODO: send netlink message */ 177 } 178 179 nl_schedule_taskqueue(nlp); 180 NLP_UNLOCK(nlp); 181 } 182 183 void 184 nl_taskqueue_handler(void *_arg, int pending) 185 { 186 struct nlpcb *nlp = (struct nlpcb *)_arg; 187 188 CURVNET_SET(nlp->nl_socket->so_vnet); 189 nl_process_received(nlp); 190 CURVNET_RESTORE(); 191 } 192 193 /* 194 * Tries to send current data buffer from writer. 195 * 196 * Returns true on success. 197 * If no queue overrunes happened, wakes up socket owner. 198 */ 199 bool 200 nl_send(struct nl_writer *nw, struct nlpcb *nlp) 201 { 202 struct socket *so = nlp->nl_socket; 203 struct sockbuf *sb = &so->so_rcv; 204 struct nl_buf *nb; 205 206 MPASS(nw->hdr == NULL); 207 MPASS(nw->buf != NULL); 208 MPASS(nw->buf->datalen > 0); 209 210 IF_DEBUG_LEVEL(LOG_DEBUG2) { 211 struct nlmsghdr *hdr = (struct nlmsghdr *)nw->buf->data; 212 NLP_LOG(LOG_DEBUG2, nlp, 213 "TX len %u msgs %u msg type %d first hdrlen %u", 214 nw->buf->datalen, nw->num_messages, hdr->nlmsg_type, 215 hdr->nlmsg_len); 216 } 217 218 if (nlp->nl_linux && linux_netlink_p != NULL && 219 __predict_false(!linux_netlink_p->msgs_to_linux(nw, nlp))) { 220 nl_buf_free(nw->buf); 221 nw->buf = NULL; 222 return (false); 223 } 224 225 nb = nw->buf; 226 nw->buf = NULL; 227 228 SOCK_RECVBUF_LOCK(so); 229 if (!nw->ignore_limit && __predict_false(sb->sb_hiwat <= sb->sb_ccc)) { 230 SOCK_RECVBUF_UNLOCK(so); 231 NLP_LOCK(nlp); 232 nlp->nl_dropped_bytes += nb->datalen; 233 nlp->nl_dropped_messages += nw->num_messages; 234 NLP_LOG(LOG_DEBUG2, nlp, "RX oveflow: %lu m (+%d), %lu b (+%d)", 235 (unsigned long)nlp->nl_dropped_messages, nw->num_messages, 236 (unsigned long)nlp->nl_dropped_bytes, nb->datalen); 237 NLP_UNLOCK(nlp); 238 nl_buf_free(nb); 239 return (false); 240 } else { 241 bool full; 242 243 TAILQ_INSERT_TAIL(&sb->nl_queue, nb, tailq); 244 sb->sb_acc += nb->datalen; 245 sb->sb_ccc += nb->datalen; 246 full = sb->sb_hiwat <= sb->sb_ccc; 247 sorwakeup_locked(so); 248 if (full) { 249 NLP_LOCK(nlp); 250 nlp->nl_tx_blocked = true; 251 NLP_UNLOCK(nlp); 252 } 253 return (true); 254 } 255 } 256 257 static int 258 nl_receive_message(struct nlmsghdr *hdr, int remaining_length, 259 struct nlpcb *nlp, struct nl_pstate *npt) 260 { 261 nl_handler_f handler = nl_handlers[nlp->nl_proto].cb; 262 int error = 0; 263 264 NLP_LOG(LOG_DEBUG2, nlp, "msg len: %u type: %d: flags: 0x%X seq: %u pid: %u", 265 hdr->nlmsg_len, hdr->nlmsg_type, hdr->nlmsg_flags, hdr->nlmsg_seq, 266 hdr->nlmsg_pid); 267 268 if (__predict_false(hdr->nlmsg_len > remaining_length)) { 269 NLP_LOG(LOG_DEBUG, nlp, "message is not entirely present: want %d got %d", 270 hdr->nlmsg_len, remaining_length); 271 return (EINVAL); 272 } else if (__predict_false(hdr->nlmsg_len < sizeof(*hdr))) { 273 NL_LOG(LOG_DEBUG, "message too short: %d", hdr->nlmsg_len); 274 return (EINVAL); 275 } 276 /* Stamp each message with sender pid */ 277 hdr->nlmsg_pid = nlp->nl_port; 278 279 npt->hdr = hdr; 280 281 if (hdr->nlmsg_flags & NLM_F_REQUEST && 282 hdr->nlmsg_type >= NLMSG_MIN_TYPE) { 283 NL_LOG(LOG_DEBUG2, "handling message with msg type: %d", 284 hdr->nlmsg_type); 285 if (nlp->nl_linux) { 286 MPASS(linux_netlink_p != NULL); 287 error = linux_netlink_p->msg_from_linux(nlp->nl_proto, 288 &hdr, npt); 289 if (error) 290 goto ack; 291 } 292 error = handler(hdr, npt); 293 NL_LOG(LOG_DEBUG2, "retcode: %d", error); 294 } 295 ack: 296 if ((hdr->nlmsg_flags & NLM_F_ACK) || (error != 0 && error != EINTR)) { 297 if (!npt->nw->suppress_ack) { 298 NL_LOG(LOG_DEBUG3, "ack"); 299 nlmsg_ack(nlp, error, hdr, npt); 300 } 301 } 302 303 return (0); 304 } 305 306 static void 307 npt_clear(struct nl_pstate *npt) 308 { 309 lb_clear(&npt->lb); 310 npt->error = 0; 311 npt->err_msg = NULL; 312 npt->err_off = 0; 313 npt->hdr = NULL; 314 npt->nw->suppress_ack = false; 315 } 316 317 /* 318 * Processes an incoming packet, which can contain multiple netlink messages 319 */ 320 static bool 321 nl_process_nbuf(struct nl_buf *nb, struct nlpcb *nlp) 322 { 323 struct nl_writer nw; 324 struct nlmsghdr *hdr; 325 int error; 326 327 NL_LOG(LOG_DEBUG3, "RX netlink buf %p on %p", nb, nlp->nl_socket); 328 329 if (!nl_writer_unicast(&nw, NLMSG_SMALL, nlp, false)) { 330 NL_LOG(LOG_DEBUG, "error allocating socket writer"); 331 return (true); 332 } 333 334 nlmsg_ignore_limit(&nw); 335 336 struct nl_pstate npt = { 337 .nlp = nlp, 338 .lb.base = &nb->data[roundup2(nb->datalen, 8)], 339 .lb.size = nb->buflen - roundup2(nb->datalen, 8), 340 .nw = &nw, 341 .strict = nlp->nl_flags & NLF_STRICT, 342 }; 343 344 for (; nb->offset + sizeof(struct nlmsghdr) <= nb->datalen;) { 345 hdr = (struct nlmsghdr *)&nb->data[nb->offset]; 346 /* Save length prior to calling handler */ 347 int msglen = NLMSG_ALIGN(hdr->nlmsg_len); 348 NL_LOG(LOG_DEBUG3, "parsing offset %d/%d", 349 nb->offset, nb->datalen); 350 npt_clear(&npt); 351 error = nl_receive_message(hdr, nb->datalen - nb->offset, nlp, 352 &npt); 353 nb->offset += msglen; 354 if (__predict_false(error != 0 || nlp->nl_tx_blocked)) 355 break; 356 } 357 NL_LOG(LOG_DEBUG3, "packet parsing done"); 358 nlmsg_flush(&nw); 359 360 if (nlp->nl_tx_blocked) { 361 NLP_LOCK(nlp); 362 nlp->nl_tx_blocked = false; 363 NLP_UNLOCK(nlp); 364 return (false); 365 } else 366 return (true); 367 } 368