xref: /freebsd/usr.sbin/bhyve/net_backend_netmap.c (revision be74aede49fb480792448bf563c5079998de7cbd)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Vincenzo Maffione <vmaffione@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
19  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
20  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
21  * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
22  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
24  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
25  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <net/if.h>
29 #include <net/netmap.h>
30 #include <net/netmap_virt.h>
31 #define NETMAP_WITH_LIBS
32 #include <net/netmap_user.h>
33 
34 #include <assert.h>
35 
36 #include "debug.h"
37 #include "iov.h"
38 #include "mevent.h"
39 #include "net_backends.h"
40 #include "net_backends_priv.h"
41 
42 /* The virtio-net features supported by netmap. */
43 #define NETMAP_FEATURES (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_TSO4 | \
44 		VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_UFO | \
45 		VIRTIO_NET_F_GUEST_CSUM | VIRTIO_NET_F_GUEST_TSO4 | \
46 		VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_UFO)
47 
48 struct netmap_priv {
49 	char ifname[IFNAMSIZ];
50 	struct nm_desc *nmd;
51 	uint16_t memid;
52 	struct netmap_ring *rx;
53 	struct netmap_ring *tx;
54 	struct mevent *mevp;
55 	net_be_rxeof_t cb;
56 	void *cb_param;
57 };
58 
59 static void
nmreq_init(struct nmreq * req,char * ifname)60 nmreq_init(struct nmreq *req, char *ifname)
61 {
62 
63 	memset(req, 0, sizeof(*req));
64 	strlcpy(req->nr_name, ifname, sizeof(req->nr_name));
65 	req->nr_version = NETMAP_API;
66 }
67 
68 static int
netmap_set_vnet_hdr_len(struct net_backend * be,int vnet_hdr_len)69 netmap_set_vnet_hdr_len(struct net_backend *be, int vnet_hdr_len)
70 {
71 	int err;
72 	struct nmreq req;
73 	struct netmap_priv *priv = NET_BE_PRIV(be);
74 
75 	nmreq_init(&req, priv->ifname);
76 	req.nr_cmd = NETMAP_BDG_VNET_HDR;
77 	req.nr_arg1 = vnet_hdr_len;
78 	err = ioctl(be->fd, NIOCREGIF, &req);
79 	if (err) {
80 		EPRINTLN("Unable to set vnet header length %d", vnet_hdr_len);
81 		return (err);
82 	}
83 
84 	be->be_vnet_hdr_len = vnet_hdr_len;
85 
86 	return (0);
87 }
88 
89 static int
netmap_has_vnet_hdr_len(struct net_backend * be,unsigned vnet_hdr_len)90 netmap_has_vnet_hdr_len(struct net_backend *be, unsigned vnet_hdr_len)
91 {
92 	unsigned prev_hdr_len = be->be_vnet_hdr_len;
93 	int ret;
94 
95 	if (vnet_hdr_len == prev_hdr_len) {
96 		return (1);
97 	}
98 
99 	ret = netmap_set_vnet_hdr_len(be, vnet_hdr_len);
100 	if (ret) {
101 		return (0);
102 	}
103 
104 	netmap_set_vnet_hdr_len(be, prev_hdr_len);
105 
106 	return (1);
107 }
108 
109 static uint64_t
netmap_get_cap(struct net_backend * be)110 netmap_get_cap(struct net_backend *be)
111 {
112 
113 	return (netmap_has_vnet_hdr_len(be, VNET_HDR_LEN) ?
114 	    NETMAP_FEATURES : 0);
115 }
116 
117 static int
netmap_set_cap(struct net_backend * be,uint64_t features __unused,unsigned vnet_hdr_len)118 netmap_set_cap(struct net_backend *be, uint64_t features __unused,
119     unsigned vnet_hdr_len)
120 {
121 
122 	return (netmap_set_vnet_hdr_len(be, vnet_hdr_len));
123 }
124 
125 static int
netmap_init(struct net_backend * be,const char * devname,nvlist_t * nvl __unused,net_be_rxeof_t cb,void * param)126 netmap_init(struct net_backend *be, const char *devname,
127     nvlist_t *nvl __unused, net_be_rxeof_t cb, void *param)
128 {
129 	struct netmap_priv *priv = NET_BE_PRIV(be);
130 
131 	strlcpy(priv->ifname, devname, sizeof(priv->ifname));
132 	priv->ifname[sizeof(priv->ifname) - 1] = '\0';
133 
134 	priv->nmd = nm_open(priv->ifname, NULL, NETMAP_NO_TX_POLL, NULL);
135 	if (priv->nmd == NULL) {
136 		EPRINTLN("Unable to nm_open(): interface '%s', errno (%s)",
137 		    devname, strerror(errno));
138 		return (-1);
139 	}
140 
141 	priv->memid = priv->nmd->req.nr_arg2;
142 	priv->tx = NETMAP_TXRING(priv->nmd->nifp, 0);
143 	priv->rx = NETMAP_RXRING(priv->nmd->nifp, 0);
144 	priv->cb = cb;
145 	priv->cb_param = param;
146 	be->fd = priv->nmd->fd;
147 
148 	priv->mevp = mevent_add_disabled(be->fd, EVF_READ, cb, param);
149 	if (priv->mevp == NULL) {
150 		EPRINTLN("Could not register event");
151 		return (-1);
152 	}
153 
154 	return (0);
155 }
156 
157 static void
netmap_cleanup(struct net_backend * be)158 netmap_cleanup(struct net_backend *be)
159 {
160 	struct netmap_priv *priv = NET_BE_PRIV(be);
161 
162 	if (priv->mevp) {
163 		mevent_delete(priv->mevp);
164 	}
165 	if (priv->nmd) {
166 		nm_close(priv->nmd);
167 	}
168 	be->fd = -1;
169 }
170 
171 static ssize_t
netmap_send(struct net_backend * be,const struct iovec * iov,int iovcnt)172 netmap_send(struct net_backend *be, const struct iovec *iov,
173 	    int iovcnt)
174 {
175 	struct netmap_priv *priv = NET_BE_PRIV(be);
176 	struct netmap_ring *ring;
177 	ssize_t totlen = 0;
178 	int nm_buf_size;
179 	int nm_buf_len;
180 	uint32_t head;
181 	uint8_t *nm_buf;
182 	int j;
183 
184 	ring = priv->tx;
185 	head = ring->head;
186 	if (head == ring->tail) {
187 		EPRINTLN("No space, drop %zu bytes", count_iov(iov, iovcnt));
188 		goto txsync;
189 	}
190 	nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
191 	nm_buf_size = ring->nr_buf_size;
192 	nm_buf_len = 0;
193 
194 	for (j = 0; j < iovcnt; j++) {
195 		uint8_t *iov_frag_buf = iov[j].iov_base;
196 		int iov_frag_size = iov[j].iov_len;
197 
198 		totlen += iov_frag_size;
199 
200 		/*
201 		 * Split each iovec fragment over more netmap slots, if
202 		 * necessary.
203 		 */
204 		for (;;) {
205 			int copylen;
206 
207 			copylen = iov_frag_size < nm_buf_size ? iov_frag_size : nm_buf_size;
208 			memcpy(nm_buf, iov_frag_buf, copylen);
209 
210 			iov_frag_buf += copylen;
211 			iov_frag_size -= copylen;
212 			nm_buf += copylen;
213 			nm_buf_size -= copylen;
214 			nm_buf_len += copylen;
215 
216 			if (iov_frag_size == 0) {
217 				break;
218 			}
219 
220 			ring->slot[head].len = nm_buf_len;
221 			ring->slot[head].flags = NS_MOREFRAG;
222 			head = nm_ring_next(ring, head);
223 			if (head == ring->tail) {
224 				/*
225 				 * We ran out of netmap slots while
226 				 * splitting the iovec fragments.
227 				 */
228 				EPRINTLN("No space, drop %zu bytes",
229 				    count_iov(iov, iovcnt));
230 				goto txsync;
231 			}
232 			nm_buf = NETMAP_BUF(ring, ring->slot[head].buf_idx);
233 			nm_buf_size = ring->nr_buf_size;
234 			nm_buf_len = 0;
235 		}
236 	}
237 
238 	/* Complete the last slot, which must not have NS_MOREFRAG set. */
239 	ring->slot[head].len = nm_buf_len;
240 	ring->slot[head].flags = 0;
241 	head = nm_ring_next(ring, head);
242 
243 	/* Now update ring->head and ring->cur. */
244 	ring->head = ring->cur = head;
245 txsync:
246 	ioctl(be->fd, NIOCTXSYNC, NULL);
247 
248 	return (totlen);
249 }
250 
251 static ssize_t
netmap_peek_recvlen(struct net_backend * be)252 netmap_peek_recvlen(struct net_backend *be)
253 {
254 	struct netmap_priv *priv = NET_BE_PRIV(be);
255 	struct netmap_ring *ring = priv->rx;
256 	uint32_t head = ring->head;
257 	ssize_t totlen = 0;
258 
259 	while (head != ring->tail) {
260 		struct netmap_slot *slot = ring->slot + head;
261 
262 		totlen += slot->len;
263 		if ((slot->flags & NS_MOREFRAG) == 0)
264 			break;
265 		head = nm_ring_next(ring, head);
266 	}
267 
268 	return (totlen);
269 }
270 
271 static ssize_t
netmap_recv(struct net_backend * be,const struct iovec * iov,int iovcnt)272 netmap_recv(struct net_backend *be, const struct iovec *iov, int iovcnt)
273 {
274 	struct netmap_priv *priv = NET_BE_PRIV(be);
275 	struct netmap_slot *slot = NULL;
276 	struct netmap_ring *ring;
277 	uint8_t *iov_frag_buf;
278 	int iov_frag_size;
279 	ssize_t totlen = 0;
280 	uint32_t head;
281 
282 	assert(iovcnt);
283 
284 	ring = priv->rx;
285 	head = ring->head;
286 	iov_frag_buf = iov->iov_base;
287 	iov_frag_size = iov->iov_len;
288 
289 	do {
290 		uint8_t *nm_buf;
291 		int nm_buf_len;
292 
293 		if (head == ring->tail) {
294 			return (0);
295 		}
296 
297 		slot = ring->slot + head;
298 		nm_buf = NETMAP_BUF(ring, slot->buf_idx);
299 		nm_buf_len = slot->len;
300 
301 		for (;;) {
302 			int copylen = nm_buf_len < iov_frag_size ?
303 			    nm_buf_len : iov_frag_size;
304 
305 			memcpy(iov_frag_buf, nm_buf, copylen);
306 			nm_buf += copylen;
307 			nm_buf_len -= copylen;
308 			iov_frag_buf += copylen;
309 			iov_frag_size -= copylen;
310 			totlen += copylen;
311 
312 			if (nm_buf_len == 0) {
313 				break;
314 			}
315 
316 			iov++;
317 			iovcnt--;
318 			if (iovcnt == 0) {
319 				/* No space to receive. */
320 				EPRINTLN("Short iov, drop %zd bytes",
321 				    totlen);
322 				return (-ENOSPC);
323 			}
324 			iov_frag_buf = iov->iov_base;
325 			iov_frag_size = iov->iov_len;
326 		}
327 
328 		head = nm_ring_next(ring, head);
329 
330 	} while (slot->flags & NS_MOREFRAG);
331 
332 	/* Release slots to netmap. */
333 	ring->head = ring->cur = head;
334 
335 	return (totlen);
336 }
337 
338 static void
netmap_recv_enable(struct net_backend * be)339 netmap_recv_enable(struct net_backend *be)
340 {
341 	struct netmap_priv *priv = NET_BE_PRIV(be);
342 
343 	mevent_enable(priv->mevp);
344 }
345 
346 static void
netmap_recv_disable(struct net_backend * be)347 netmap_recv_disable(struct net_backend *be)
348 {
349 	struct netmap_priv *priv = NET_BE_PRIV(be);
350 
351 	mevent_disable(priv->mevp);
352 }
353 
354 static struct net_backend netmap_backend = {
355 	.prefix = "netmap",
356 	.priv_size = sizeof(struct netmap_priv),
357 	.init = netmap_init,
358 	.cleanup = netmap_cleanup,
359 	.send = netmap_send,
360 	.peek_recvlen = netmap_peek_recvlen,
361 	.recv = netmap_recv,
362 	.recv_enable = netmap_recv_enable,
363 	.recv_disable = netmap_recv_disable,
364 	.get_cap = netmap_get_cap,
365 	.set_cap = netmap_set_cap,
366 };
367 
368 /* A clone of the netmap backend, with a different prefix. */
369 static struct net_backend vale_backend = {
370 	.prefix = "vale",
371 	.priv_size = sizeof(struct netmap_priv),
372 	.init = netmap_init,
373 	.cleanup = netmap_cleanup,
374 	.send = netmap_send,
375 	.peek_recvlen = netmap_peek_recvlen,
376 	.recv = netmap_recv,
377 	.recv_enable = netmap_recv_enable,
378 	.recv_disable = netmap_recv_disable,
379 	.get_cap = netmap_get_cap,
380 	.set_cap = netmap_set_cap,
381 };
382 
383 DATA_SET(net_backend_set, netmap_backend);
384 DATA_SET(net_backend_set, vale_backend);
385